Fix MapPhysicalMemory when block position is less than map position (#359)

* Fix MapPhysicalMemory when block position is less than map position

* Only try to free pages that are actually mapped

* Only unmap after freeing the memory
This commit is contained in:
gdkchan 2018-08-17 10:39:35 -03:00 committed by GitHub
parent 34100051e4
commit 056c2840b1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -259,9 +259,9 @@ namespace Ryujinx.HLE.HOS.Kernel
InsertBlock(FreeAddr, PagesCount, MemoryState.Unmapped);
CpuMemory.Unmap(FreeAddr, DiffSize);
FreePages(FreeAddr, PagesCount);
CpuMemory.Unmap(FreeAddr, DiffSize);
}
}
}
@ -631,7 +631,14 @@ namespace Ryujinx.HLE.HOS.Kernel
{
long CurrSize = GetSizeInRange(Info, Position, End);
CpuMemory.Map(Info.Position, PA, CurrSize);
long MapPosition = Info.Position;
if ((ulong)MapPosition < (ulong)Position)
{
MapPosition = Position;
}
CpuMemory.Map(MapPosition, PA, CurrSize);
PA += CurrSize;
}
@ -705,10 +712,10 @@ namespace Ryujinx.HLE.HOS.Kernel
InsertBlock(Position, PagesCount, MemoryState.Unmapped);
CpuMemory.Unmap(Position, Size);
FreePages(Position, PagesCount);
CpuMemory.Unmap(Position, Size);
return 0;
}
}
@ -737,6 +744,11 @@ namespace Ryujinx.HLE.HOS.Kernel
{
long VA = Position + Page * PageSize;
if (!CpuMemory.IsMapped(VA))
{
continue;
}
long PA = CpuMemory.GetPhysicalAddress(VA);
Allocator.Free(PA, PageSize);