Support memory aliasing (#2954)

* Back to the origins: Make memory manager take guest PA rather than host address once again

* Direct mapping with alias support on Windows

* Fixes and remove more of the emulated shared memory

* Linux support

* Make shared and transfer memory not depend on SharedMemoryStorage

* More efficient view mapping on Windows (no more restricted to 4KB pages at a time)

* Handle potential access violations caused by partial unmap

* Implement host mapping using shared memory on Linux

* Add new GetPhysicalAddressChecked method, used to ensure the virtual address is mapped before address translation

Also align GetRef behaviour with software memory manager

* We don't need a mirrorable memory block for software memory manager mode

* Disable memory aliasing tests while we don't have shared memory support on Mac

* Shared memory & SIGBUS handler for macOS

* Fix typo + nits + re-enable memory tests

* Set MAP_JIT_DARWIN on x86 Mac too

* Add back the address space mirror

* Only set MAP_JIT_DARWIN if we are mapping as executable

* Disable aliasing tests again (still fails on Mac)

* Fix UnmapView4KB (by not casting size to int)

* Use ref counting on memory blocks to delay closing the shared memory handle until all blocks using it are disposed

* Address PR feedback

* Make RO hold a reference to the guest process memory manager to avoid early disposal

Co-authored-by: nastys <nastys@users.noreply.github.com>
This commit is contained in:
gdkchan 2022-05-02 20:30:02 -03:00 committed by GitHub
parent 4a892fbdc9
commit 95017b8c66
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
41 changed files with 2373 additions and 2155 deletions

View file

@ -103,7 +103,7 @@ namespace ARMeilleure.Signal
// Unix siginfo struct locations.
// NOTE: These are incredibly likely to be different between kernel version and architectures.
config.StructAddressOffset = 16; // si_addr
config.StructAddressOffset = OperatingSystem.IsMacOS() ? 24 : 16; // si_addr
config.StructWriteOffset = 8; // si_code
_signalHandlerPtr = Marshal.GetFunctionPointerForDelegate(GenerateUnixSignalHandler(_handlerConfig));

View file

@ -21,6 +21,7 @@ namespace ARMeilleure.Signal
static class UnixSignalHandlerRegistration
{
private const int SIGSEGV = 11;
private const int SIGBUS = 10;
private const int SA_SIGINFO = 0x00000004;
[DllImport("libc", SetLastError = true)]
@ -43,7 +44,17 @@ namespace ARMeilleure.Signal
if (result != 0)
{
throw new InvalidOperationException($"Could not register sigaction. Error: {result}");
throw new InvalidOperationException($"Could not register SIGSEGV sigaction. Error: {result}");
}
if (OperatingSystem.IsMacOS())
{
result = sigaction(SIGBUS, ref sig, out SigAction oldb);
if (result != 0)
{
throw new InvalidOperationException($"Could not register SIGBUS sigaction. Error: {result}");
}
}
return old;
@ -51,7 +62,7 @@ namespace ARMeilleure.Signal
public static bool RestoreExceptionHandler(SigAction oldAction)
{
return sigaction(SIGSEGV, ref oldAction, out SigAction _) == 0;
return sigaction(SIGSEGV, ref oldAction, out SigAction _) == 0 && (!OperatingSystem.IsMacOS() || sigaction(SIGBUS, ref oldAction, out SigAction _) == 0);
}
}
}

View file

@ -8,7 +8,7 @@ namespace ARMeilleure.Translation
/// </summary>
/// <typeparam name="K">Key</typeparam>
/// <typeparam name="V">Value</typeparam>
public class IntervalTree<K, V> where K : IComparable<K>
class IntervalTree<K, V> where K : IComparable<K>
{
private const int ArrayGrowthSize = 32;
@ -53,7 +53,7 @@ namespace ARMeilleure.Translation
/// <returns>Number of intervals found</returns>
public int Get(K start, K end, ref K[] overlaps, int overlapCount = 0)
{
GetValues(_root, start, end, ref overlaps, ref overlapCount);
GetKeys(_root, start, end, ref overlaps, ref overlapCount);
return overlapCount;
}
@ -180,20 +180,20 @@ namespace ARMeilleure.Translation
}
/// <summary>
/// Retrieve all values that overlap the given start and end keys.
/// Retrieve all keys that overlap the given start and end keys.
/// </summary>
/// <param name="start">Start of the range</param>
/// <param name="end">End of the range</param>
/// <param name="overlaps">Overlaps array to place results in</param>
/// <param name="overlapCount">Overlaps count to update</param>
private void GetValues(IntervalTreeNode<K, V> node, K start, K end, ref K[] overlaps, ref int overlapCount)
private void GetKeys(IntervalTreeNode<K, V> node, K start, K end, ref K[] overlaps, ref int overlapCount)
{
if (node == null || start.CompareTo(node.Max) >= 0)
{
return;
}
GetValues(node.Left, start, end, ref overlaps, ref overlapCount);
GetKeys(node.Left, start, end, ref overlaps, ref overlapCount);
bool endsOnRight = end.CompareTo(node.Start) > 0;
if (endsOnRight)
@ -208,7 +208,7 @@ namespace ARMeilleure.Translation
overlaps[overlapCount++] = node.Start;
}
GetValues(node.Right, start, end, ref overlaps, ref overlapCount);
GetKeys(node.Right, start, end, ref overlaps, ref overlapCount);
}
}
@ -717,40 +717,40 @@ namespace ARMeilleure.Translation
/// </summary>
/// <typeparam name="K">Key type of the node</typeparam>
/// <typeparam name="V">Value type of the node</typeparam>
internal class IntervalTreeNode<K, V>
class IntervalTreeNode<K, V>
{
internal bool Color = true;
internal IntervalTreeNode<K, V> Left = null;
internal IntervalTreeNode<K, V> Right = null;
internal IntervalTreeNode<K, V> Parent = null;
public bool Color = true;
public IntervalTreeNode<K, V> Left = null;
public IntervalTreeNode<K, V> Right = null;
public IntervalTreeNode<K, V> Parent = null;
/// <summary>
/// The start of the range.
/// </summary>
internal K Start;
public K Start;
/// <summary>
/// The end of the range.
/// </summary>
internal K End;
public K End;
/// <summary>
/// The maximum end value of this node and all its children.
/// </summary>
internal K Max;
public K Max;
/// <summary>
/// Value stored on this node.
/// </summary>
internal V Value;
public V Value;
public IntervalTreeNode(K start, K end, V value, IntervalTreeNode<K, V> parent)
{
this.Start = start;
this.End = end;
this.Max = end;
this.Value = value;
this.Parent = parent;
Start = start;
End = end;
Max = end;
Value = value;
Parent = parent;
}
}
}

View file

@ -1,7 +1,5 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
using System.Linq;
namespace Ryujinx.Common.Collections
@ -779,37 +777,37 @@ namespace Ryujinx.Common.Collections
/// </summary>
/// <typeparam name="K">Key type of the node</typeparam>
/// <typeparam name="V">Value type of the node</typeparam>
internal class IntervalTreeNode<K, V>
class IntervalTreeNode<K, V>
{
internal bool Color = true;
internal IntervalTreeNode<K, V> Left = null;
internal IntervalTreeNode<K, V> Right = null;
internal IntervalTreeNode<K, V> Parent = null;
public bool Color = true;
public IntervalTreeNode<K, V> Left = null;
public IntervalTreeNode<K, V> Right = null;
public IntervalTreeNode<K, V> Parent = null;
/// <summary>
/// The start of the range.
/// </summary>
internal K Start;
public K Start;
/// <summary>
/// The end of the range - maximum of all in the Values list.
/// </summary>
internal K End;
public K End;
/// <summary>
/// The maximum end value of this node and all its children.
/// </summary>
internal K Max;
public K Max;
internal List<RangeNode<K, V>> Values;
public List<RangeNode<K, V>> Values;
public IntervalTreeNode(K start, K end, V value, IntervalTreeNode<K, V> parent)
{
this.Start = start;
this.End = end;
this.Max = end;
this.Values = new List<RangeNode<K, V>> { new RangeNode<K, V>(start, end, value) };
this.Parent = parent;
Start = start;
End = end;
Max = end;
Values = new List<RangeNode<K, V>> { new RangeNode<K, V>(start, end, value) };
Parent = parent;
}
}
}

View file

@ -967,20 +967,20 @@ namespace Ryujinx.Common.Collections
/// </summary>
/// <typeparam name="K">Key of the node</typeparam>
/// <typeparam name="V">Value of the node</typeparam>
internal class Node<K, V>
class Node<K, V>
{
internal bool Color = true;
internal Node<K, V> Left = null;
internal Node<K, V> Right = null;
internal Node<K, V> Parent = null;
internal K Key;
internal V Value;
public bool Color = true;
public Node<K, V> Left = null;
public Node<K, V> Right = null;
public Node<K, V> Parent = null;
public K Key;
public V Value;
public Node(K key, V value, Node<K, V> parent)
{
this.Key = key;
this.Value = value;
this.Parent = parent;
Key = key;
Value = value;
Parent = parent;
}
}
}

View file

@ -24,7 +24,7 @@ namespace Ryujinx.Cpu
_baseAddress = (ulong)_addressSpace.Pointer;
ulong endAddress = _baseAddress + addressSpace.Size;
_trackingEvent = new TrackingEventDelegate(tracking.VirtualMemoryEvent);
_trackingEvent = new TrackingEventDelegate(tracking.VirtualMemoryEventEh);
bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)

View file

@ -25,6 +25,7 @@ namespace Ryujinx.Cpu
private const int PointerTagBit = 62;
private readonly MemoryBlock _backingMemory;
private readonly InvalidAccessHandler _invalidAccessHandler;
/// <summary>
@ -50,10 +51,12 @@ namespace Ryujinx.Cpu
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public MemoryManager(ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
public MemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
{
_backingMemory = backingMemory;
_invalidAccessHandler = invalidAccessHandler;
ulong asSize = PageSize;
@ -73,18 +76,19 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public void Map(ulong va, nuint hostAddress, ulong size)
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
ulong remainingSize = size;
ulong oVa = va;
ulong oPa = pa;
while (remainingSize != 0)
{
_pageTable.Write((va / PageSize) * PteSize, hostAddress);
_pageTable.Write((va / PageSize) * PteSize, PaToPte(pa));
va += PageSize;
hostAddress += PageSize;
pa += PageSize;
remainingSize -= PageSize;
}
Tracking.Map(oVa, size);
@ -107,7 +111,7 @@ namespace Ryujinx.Cpu
ulong remainingSize = size;
while (remainingSize != 0)
{
_pageTable.Write((va / PageSize) * PteSize, (nuint)0);
_pageTable.Write((va / PageSize) * PteSize, 0UL);
va += PageSize;
remainingSize -= PageSize;
@ -122,9 +126,22 @@ namespace Ryujinx.Cpu
/// <inheritdoc/>
public T ReadTracked<T>(ulong va) where T : unmanaged
{
try
{
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), false);
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
return Read<T>(va);
}
catch (InvalidMemoryRegionException)
{
if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
{
throw;
}
return default;
}
}
/// <inheritdoc/>
@ -177,7 +194,7 @@ namespace Ryujinx.Cpu
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(GetHostSpanContiguous(va, data.Length));
data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
}
else
{
@ -185,18 +202,22 @@ namespace Ryujinx.Cpu
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
}
}
}
@ -224,7 +245,7 @@ namespace Ryujinx.Cpu
if (IsContiguousAndMapped(va, size))
{
return GetHostSpanContiguous(va, size);
return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
}
else
{
@ -251,7 +272,7 @@ namespace Ryujinx.Cpu
SignalMemoryTracking(va, (ulong)size, true);
}
return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
}
else
{
@ -264,7 +285,7 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
@ -273,7 +294,7 @@ namespace Ryujinx.Cpu
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
return ref *(T*)GetHostAddress(va);
return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
}
/// <summary>
@ -293,7 +314,7 @@ namespace Ryujinx.Cpu
return (int)(vaSpan / PageSize);
}
private void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
@ -315,7 +336,7 @@ namespace Ryujinx.Cpu
return false;
}
if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
@ -327,11 +348,11 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
return Enumerable.Empty<MemoryRange>();
}
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
@ -341,9 +362,9 @@ namespace Ryujinx.Cpu
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<HostMemoryRange>();
var regions = new List<MemoryRange>();
nuint regionStart = GetHostAddress(va);
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
@ -353,12 +374,12 @@ namespace Ryujinx.Cpu
return null;
}
nuint newHostAddress = GetHostAddress(va + PageSize);
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetHostAddress(va) + PageSize != newHostAddress)
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new HostMemoryRange(regionStart, regionSize));
regionStart = newHostAddress;
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
@ -366,7 +387,7 @@ namespace Ryujinx.Cpu
regionSize += PageSize;
}
regions.Add(new HostMemoryRange(regionStart, regionSize));
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
@ -386,18 +407,22 @@ namespace Ryujinx.Cpu
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
}
}
catch (InvalidMemoryRegionException)
@ -446,7 +471,7 @@ namespace Ryujinx.Cpu
return false;
}
return _pageTable.Read<nuint>((va / PageSize) * PteSize) != 0;
return _pageTable.Read<ulong>((va / PageSize) * PteSize) != 0;
}
private bool ValidateAddress(ulong va)
@ -480,37 +505,20 @@ namespace Ryujinx.Cpu
}
}
/// <summary>
/// Get a span representing the given virtual address and size range in host memory.
/// This function assumes that the requested virtual memory region is contiguous.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>A span representing the given virtual range in host memory</returns>
/// <exception cref="InvalidMemoryRegionException">Throw when the base virtual address is not mapped</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
private ulong GetPhysicalAddress(ulong va)
{
return new Span<byte>((void*)GetHostAddress(va), size);
// We return -1L if the virtual address is invalid or unmapped.
if (!ValidateAddress(va) || !IsMapped(va))
{
return ulong.MaxValue;
}
/// <summary>
/// Get the host address for a given virtual address, using the page table.
/// </summary>
/// <param name="va">Virtual address</param>
/// <returns>The corresponding host address for the given virtual address</returns>
/// <exception cref="InvalidMemoryRegionException">Throw when the virtual address is not mapped</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private nuint GetHostAddress(ulong va)
{
nuint pageBase = _pageTable.Read<nuint>((va / PageSize) * PteSize) & unchecked((nuint)0xffff_ffff_ffffUL);
if (pageBase == 0)
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
return GetPhysicalAddressInternal(va);
}
return pageBase + (nuint)(va & PageMask);
private ulong GetPhysicalAddressInternal(ulong va)
{
return PteToPa(_pageTable.Read<ulong>((va / PageSize) * PteSize) & ~(0xffffUL << 48)) + (va & PageMask);
}
/// <inheritdoc/>
@ -604,6 +612,16 @@ namespace Ryujinx.Cpu
}
}
private ulong PaToPte(ulong pa)
{
return (ulong)_backingMemory.GetPointer(pa, PageSize);
}
private ulong PteToPa(ulong pte)
{
return (ulong)((long)pte - _backingMemory.Pointer.ToInt64());
}
/// <summary>
/// Disposes of resources used by the memory manager.
/// </summary>

View file

@ -5,7 +5,6 @@ using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
@ -14,7 +13,7 @@ namespace Ryujinx.Cpu
/// <summary>
/// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region.
/// </summary>
public class MemoryManagerHostMapped : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked
public class MemoryManagerHostMapped : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock
{
public const int PageBits = 12;
public const int PageSize = 1 << PageBits;
@ -42,9 +41,12 @@ namespace Ryujinx.Cpu
private readonly MemoryBlock _addressSpaceMirror;
private readonly ulong _addressSpaceSize;
private readonly MemoryBlock _backingMemory;
private readonly PageTable<ulong> _pageTable;
private readonly MemoryEhMeilleure _memoryEh;
private ulong[] _pageTable;
private readonly ulong[] _pageBitmap;
public int AddressSpaceBits { get; }
@ -59,11 +61,14 @@ namespace Ryujinx.Cpu
/// <summary>
/// Creates a new instance of the host mapped memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
/// <param name="unsafeMode">True if unmanaged access should not be masked (unsafe), false otherwise.</param>
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public MemoryManagerHostMapped(ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null)
public MemoryManagerHostMapped(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null)
{
_backingMemory = backingMemory;
_pageTable = new PageTable<ulong>();
_invalidAccessHandler = invalidAccessHandler;
_unsafeMode = unsafeMode;
_addressSpaceSize = addressSpaceSize;
@ -79,9 +84,13 @@ namespace Ryujinx.Cpu
AddressSpaceBits = asBits;
_pageTable = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
_addressSpace = new MemoryBlock(asSize, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Mirrorable);
_addressSpaceMirror = _addressSpace.CreateMirror();
_pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible;
_addressSpace = new MemoryBlock(asSize, asFlags);
_addressSpaceMirror = new MemoryBlock(asSize, asFlags | MemoryAllocationFlags.ForceWindows4KBViewMapping);
Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler);
_memoryEh = new MemoryEhMeilleure(_addressSpace, Tracking);
}
@ -136,12 +145,14 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public void Map(ulong va, nuint hostAddress, ulong size)
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
_addressSpace.Commit(va, size);
_addressSpace.MapView(_backingMemory, pa, va, size);
_addressSpaceMirror.MapView(_backingMemory, pa, va, size);
AddMapping(va, size);
PtMap(va, pa, size);
Tracking.Map(va, size);
}
@ -155,7 +166,32 @@ namespace Ryujinx.Cpu
Tracking.Unmap(va, size);
RemoveMapping(va, size);
_addressSpace.Decommit(va, size);
PtUnmap(va, size);
_addressSpace.UnmapView(_backingMemory, va, size);
_addressSpaceMirror.UnmapView(_backingMemory, va, size);
}
private void PtMap(ulong va, ulong pa, ulong size)
{
while (size != 0)
{
_pageTable.Map(va, pa);
va += PageSize;
pa += PageSize;
size -= PageSize;
}
}
private void PtUnmap(ulong va, ulong size)
{
while (size != 0)
{
_pageTable.Unmap(va);
va += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
@ -216,6 +252,7 @@ namespace Ryujinx.Cpu
}
}
/// <inheritdoc/>
public void Write<T>(ulong va, T value) where T : unmanaged
{
@ -267,7 +304,7 @@ namespace Ryujinx.Cpu
throw;
}
}
}
}
/// <inheritdoc/>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
@ -322,7 +359,7 @@ namespace Ryujinx.Cpu
int bit = (int)((page & 31) << 1);
int pageIndex = (int)(page >> PageToPteShift);
ref ulong pageRef = ref _pageTable[pageIndex];
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte = Volatile.Read(ref pageRef);
@ -373,7 +410,7 @@ namespace Ryujinx.Cpu
mask &= endMask;
}
ref ulong pageRef = ref _pageTable[pageIndex++];
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte = Volatile.Read(ref pageRef);
pte |= pte >> 1;
@ -389,16 +426,53 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<MemoryRange>();
ulong regionStart = GetPhysicalAddressChecked(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
{
return Enumerable.Empty<HostMemoryRange>();
if (!ValidateAddress(va + PageSize))
{
return null;
}
AssertMapped(va, size);
ulong newPa = GetPhysicalAddressChecked(va + PageSize);
return new HostMemoryRange[] { new HostMemoryRange(_addressSpaceMirror.GetPointer(va, size), size) };
if (GetPhysicalAddressChecked(va) + PageSize != newPa)
{
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
va += PageSize;
regionSize += PageSize;
}
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
private ulong GetPhysicalAddressChecked(ulong va)
{
if (!IsMapped(va))
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
}
return GetPhysicalAddressInternal(va);
}
private ulong GetPhysicalAddressInternal(ulong va)
{
return _pageTable.Read(va) + (va & PageMask);
}
/// <inheritdoc/>
@ -427,7 +501,7 @@ namespace Ryujinx.Cpu
int bit = (int)((pageStart & 31) << 1);
int pageIndex = (int)(pageStart >> PageToPteShift);
ref ulong pageRef = ref _pageTable[pageIndex];
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte = Volatile.Read(ref pageRef);
ulong state = ((pte >> bit) & 3);
@ -459,7 +533,7 @@ namespace Ryujinx.Cpu
mask &= endMask;
}
ref ulong pageRef = ref _pageTable[pageIndex++];
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte = Volatile.Read(ref pageRef);
ulong mappedMask = mask & BlockMappedMask;
@ -530,7 +604,7 @@ namespace Ryujinx.Cpu
ulong tag = protTag << bit;
int pageIndex = (int)(pageStart >> PageToPteShift);
ref ulong pageRef = ref _pageTable[pageIndex];
ref ulong pageRef = ref _pageBitmap[pageIndex];
ulong pte;
@ -562,7 +636,7 @@ namespace Ryujinx.Cpu
mask &= endMask;
}
ref ulong pageRef = ref _pageTable[pageIndex++];
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
ulong mappedMask;
@ -632,7 +706,7 @@ namespace Ryujinx.Cpu
mask &= endMask;
}
ref ulong pageRef = ref _pageTable[pageIndex++];
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
ulong mappedMask;
@ -677,7 +751,7 @@ namespace Ryujinx.Cpu
mask |= endMask;
}
ref ulong pageRef = ref _pageTable[pageIndex++];
ref ulong pageRef = ref _pageBitmap[pageIndex++];
ulong pte;
do
@ -695,11 +769,11 @@ namespace Ryujinx.Cpu
/// </summary>
protected override void Destroy()
{
_addressSpaceMirror.Dispose();
_addressSpace.Dispose();
_addressSpaceMirror.Dispose();
_memoryEh.Dispose();
}
private void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
private static void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
}
}

View file

@ -17,6 +17,7 @@ using Ryujinx.Common.Logging;
using Ryujinx.HLE.FileSystem;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.Loaders.Executables;
using Ryujinx.Memory;
using System;
using System.Collections.Generic;
using System.Globalization;
@ -561,7 +562,14 @@ namespace Ryujinx.HLE.HOS
Graphics.Gpu.GraphicsConfig.TitleId = TitleIdText;
_device.Gpu.HostInitalized.Set();
Ptc.Initialize(TitleIdText, DisplayVersion, usePtc, _device.Configuration.MemoryManagerMode);
MemoryManagerMode memoryManagerMode = _device.Configuration.MemoryManagerMode;
if (!MemoryBlock.SupportsFlags(MemoryAllocationFlags.ViewCompatible))
{
memoryManagerMode = MemoryManagerMode.SoftwarePageTable;
}
Ptc.Initialize(TitleIdText, DisplayVersion, usePtc, memoryManagerMode);
metaData.GetNpdm(out Npdm npdm).ThrowIfFailure();
ProgramLoader.LoadNsos(_device.System.KernelContext, out ProcessTamperInfo tamperInfo, metaData, new ProgramInfo(in npdm), executables: programs);

View file

@ -21,15 +21,20 @@ namespace Ryujinx.HLE.HOS
{
MemoryManagerMode mode = context.Device.Configuration.MemoryManagerMode;
if (!MemoryBlock.SupportsFlags(MemoryAllocationFlags.ViewCompatible))
{
mode = MemoryManagerMode.SoftwarePageTable;
}
switch (mode)
{
case MemoryManagerMode.SoftwarePageTable:
return new ArmProcessContext<MemoryManager>(pid, _gpu, new MemoryManager(addressSpaceSize, invalidAccessHandler), for64Bit);
return new ArmProcessContext<MemoryManager>(pid, _gpu, new MemoryManager(context.Memory, addressSpaceSize, invalidAccessHandler), for64Bit);
case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe:
bool unsafeMode = mode == MemoryManagerMode.HostMappedUnsafe;
return new ArmProcessContext<MemoryManagerHostMapped>(pid, _gpu, new MemoryManagerHostMapped(addressSpaceSize, unsafeMode, invalidAccessHandler), for64Bit);
return new ArmProcessContext<MemoryManagerHostMapped>(pid, _gpu, new MemoryManagerHostMapped(context.Memory, addressSpaceSize, unsafeMode, invalidAccessHandler), for64Bit);
default:
throw new ArgumentOutOfRangeException();

View file

@ -1,10 +1,7 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -12,17 +9,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
private readonly IVirtualMemoryManager _cpuMemory;
public override bool SupportsMemoryAliasing => true;
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
return _cpuMemory.GetPhysicalRegions(va, size);
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
foreach (var range in ranges)
{
pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
}
}
/// <inheritdoc/>
@ -34,7 +33,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
var srcRanges = GetPhysicalRegions(src, pagesCount * PageSize);
KPageList pageList = new KPageList();
GetPhysicalRegions(src, pagesCount * PageSize, pageList);
KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None);
@ -43,7 +43,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return result;
}
result = MapPages(dst, srcRanges, newDstPermission);
result = MapPages(dst, pageList, newDstPermission, false, 0);
if (result != KernelResult.Success)
{
@ -59,10 +59,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong size = pagesCount * PageSize;
var srcRanges = GetPhysicalRegions(src, size);
var dstRanges = GetPhysicalRegions(dst, size);
KPageList srcPageList = new KPageList();
KPageList dstPageList = new KPageList();
if (!dstRanges.SequenceEqual(srcRanges))
GetPhysicalRegions(src, size, srcPageList);
GetPhysicalRegions(dst, size, dstPageList);
if (!dstPageList.IsEqual(srcPageList))
{
return KernelResult.InvalidMemRange;
}
@ -78,7 +81,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (result != KernelResult.Success)
{
KernelResult mapResult = MapPages(dst, dstRanges, oldDstPermission);
KernelResult mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0);
Debug.Assert(mapResult == KernelResult.Success);
}
@ -92,7 +95,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
_cpuMemory.Map(dstVa, Context.Memory.GetPointer(srcPa - DramMemoryMap.DramBase, size), size);
_cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
@ -121,7 +124,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(addr, size);
_cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size);
_cpuMemory.Map(currentVa, addr, size);
if (shouldFillPages)
{
@ -136,33 +139,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
ulong currentVa = address;
foreach (var range in ranges)
{
ulong size = range.Size;
ulong pa = GetDramAddressFromHostAddress(range.Address);
if (pa != ulong.MaxValue)
{
pa += DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
Context.MemoryManager.IncrementPagesReferenceCount(pa, size / PageSize);
}
}
_cpuMemory.Map(currentVa, range.Address, size);
currentVa += size;
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
@ -172,13 +148,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
foreach (var region in regions)
{
ulong pa = GetDramAddressFromHostAddress(region.Address);
if (pa == ulong.MaxValue)
{
continue;
}
pa += DramMemoryMap.DramBase;
ulong pa = region.Address + DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
pagesToClose.AddRange(pa, region.Size / PageSize);
@ -217,15 +187,5 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
_cpuMemory.Write(va, data);
}
private ulong GetDramAddressFromHostAddress(nuint hostAddress)
{
if (hostAddress < (nuint)(ulong)Context.Memory.Pointer || hostAddress >= (nuint)((ulong)Context.Memory.Pointer + Context.Memory.Size))
{
return ulong.MaxValue;
}
return hostAddress - (ulong)Context.Memory.Pointer;
}
}
}

View file

@ -1,11 +1,9 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -73,8 +71,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private MersenneTwister _randomNumberGenerator;
public abstract bool SupportsMemoryAliasing { get; }
private MemoryFillValue _heapFillValue;
private MemoryFillValue _ipcFillValue;
@ -380,8 +376,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public KernelResult UnmapPages(ulong address, ulong pagesCount, IEnumerable<HostMemoryRange> ranges, MemoryState stateExpected)
public KernelResult UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected)
{
ulong pagesCount = pageList.GetPagesCount();
ulong size = pagesCount * PageSize;
ulong endAddr = address + size;
@ -405,9 +402,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
lock (_blockManager)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, size, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -1690,11 +1689,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
bool send,
out ulong dst)
{
if (!SupportsMemoryAliasing)
{
throw new NotSupportedException("Memory aliasing not supported, can't map IPC buffers.");
}
dst = 0;
if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
@ -1828,7 +1822,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong alignedSize = endAddrTruncated - addressRounded;
KernelResult result = MapPages(currentVa, srcPageTable.GetPhysicalRegions(addressRounded, alignedSize), permission);
KPageList pageList = new KPageList();
srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
KernelResult result = MapPages(currentVa, pageList, permission);
if (result != KernelResult.Success)
{
@ -2041,7 +2038,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult BorrowTransferMemory(List<HostMemoryRange> ranges, ulong address, ulong size, KMemoryPermission permission)
public KernelResult BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
{
return SetAttributesAndChangePermission(
address,
@ -2054,7 +2051,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.None,
permission,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult SetAttributesAndChangePermission(
@ -2068,7 +2065,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeSetMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2093,7 +2090,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
ranges?.AddRange(GetPhysicalRegions(address, size));
if (pageList != null)
{
GetPhysicalRegions(address, pagesCount * PageSize, pageList);
}
if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
{
@ -2143,7 +2143,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult UnborrowTransferMemory(ulong address, ulong size, List<HostMemoryRange> ranges)
public KernelResult UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
{
return ClearAttributesAndChangePermission(
address,
@ -2156,7 +2156,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed,
KMemoryPermission.ReadAndWrite,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult ClearAttributesAndChangePermission(
@ -2170,7 +2170,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeClearMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2195,11 +2195,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
if (ranges != null)
if (pageList != null)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, pagesCount * PageSize, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -2741,8 +2743,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
protected abstract IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <param name="pageList">Page list where the ranges will be added</param>
protected abstract void GetPhysicalRegions(ulong va, ulong size, KPageList pageList);
/// <summary>
/// Gets a read-only span of data from CPU mapped memory.
@ -2803,16 +2805,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <returns>Result of the mapping operation</returns>
protected abstract KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0);
/// <summary>
/// Maps a region of memory into the specified host memory ranges.
/// </summary>
/// <param name="address">Destination virtual address that should be mapped</param>
/// <param name="ranges">Ranges of host memory that should be mapped</param>
/// <param name="permission">Permission of the region to be mapped</param>
/// <returns>Result of the mapping operation</returns>
/// <exception cref="NotSupportedException">The implementation does not support memory aliasing</exception>
protected abstract KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission);
/// <summary>
/// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
/// </summary>

View file

@ -1,139 +0,0 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageTableHostMapped : KPageTableBase
{
private const int CopyChunckSize = 0x100000;
private readonly IVirtualMemoryManager _cpuMemory;
public override bool SupportsMemoryAliasing => false;
public KPageTableHostMapped(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
return _cpuMemory.GetPhysicalRegions(va, size);
}
/// <inheritdoc/>
protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
{
return _cpuMemory.GetSpan(va, size);
}
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
ulong size = pagesCount * PageSize;
_cpuMemory.Map(dst, 0, size);
ulong currentSize = size;
while (currentSize > 0)
{
ulong copySize = Math.Min(currentSize, CopyChunckSize);
_cpuMemory.Write(dst, _cpuMemory.GetSpan(src, (int)copySize));
currentSize -= copySize;
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
{
ulong size = pagesCount * PageSize;
// TODO: Validation.
ulong currentSize = size;
while (currentSize > 0)
{
ulong copySize = Math.Min(currentSize, CopyChunckSize);
_cpuMemory.Write(src, _cpuMemory.GetSpan(dst, (int)copySize));
currentSize -= copySize;
}
_cpuMemory.Unmap(dst, size);
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
_cpuMemory.Map(dstVa, 0, pagesCount * PageSize);
if (shouldFillPages)
{
_cpuMemory.Fill(dstVa, pagesCount * PageSize, fillValue);
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
ulong pagesCount = pageList.GetPagesCount();
_cpuMemory.Map(address, 0, pagesCount * PageSize);
if (shouldFillPages)
{
_cpuMemory.Fill(address, pagesCount * PageSize, fillValue);
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
throw new NotSupportedException();
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
_cpuMemory.Unmap(address, pagesCount * PageSize);
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
{
_cpuMemory.SignalMemoryTracking(va, size, write);
}
/// <inheritdoc/>
protected override void Write(ulong va, ReadOnlySpan<byte> data)
{
_cpuMemory.Write(va, data);
}
}
}

View file

@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KSharedMemory : KAutoObject
{
private readonly SharedMemoryStorage _storage;
private readonly KPageList _pageList;
private readonly ulong _ownerPid;
@ -20,7 +20,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KMemoryPermission ownerPermission,
KMemoryPermission userPermission) : base(context)
{
_storage = storage;
_pageList = storage.GetPageList();
_ownerPid = ownerPid;
_ownerPermission = ownerPermission;
_userPermission = userPermission;
@ -33,10 +33,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KProcess process,
KMemoryPermission permission)
{
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
if (pageList.GetPagesCount() != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
@ -50,35 +47,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidPermission;
}
KernelResult result = memoryManager.MapPages(address, pageList, MemoryState.SharedMemory, permission);
if (result == KernelResult.Success && !memoryManager.SupportsMemoryAliasing)
{
_storage.Borrow(process, address);
return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
}
return result;
}
public KernelResult UnmapFromProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process)
public KernelResult UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
{
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
ulong pagesCount = pageList.GetPagesCount();
if (pagesCount != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
return memoryManager.UnmapPages(address, pagesCount, ranges, MemoryState.SharedMemory);
return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
}
}
}

View file

@ -1,9 +1,7 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -14,9 +12,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
// TODO: Remove when we no longer need to read it from the owner directly.
public KProcess Creator => _creator;
private readonly List<HostMemoryRange> _ranges;
private readonly SharedMemoryStorage _storage;
private readonly KPageList _pageList;
public ulong Address { get; private set; }
public ulong Size { get; private set; }
@ -28,12 +24,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public KTransferMemory(KernelContext context) : base(context)
{
_ranges = new List<HostMemoryRange>();
_pageList = new KPageList();
}
public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
{
_storage = storage;
_pageList = storage.GetPageList();
Permission = KMemoryPermission.ReadAndWrite;
_hasBeenInitialized = true;
@ -46,7 +42,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
_creator = creator;
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_ranges, address, size, permission);
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
if (result != KernelResult.Success)
{
@ -71,15 +67,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KProcess process,
KMemoryPermission permission)
{
if (_storage == null)
{
throw new NotImplementedException();
}
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
if (pageList.GetPagesCount() != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
@ -91,16 +79,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
KernelResult result = memoryManager.MapPages(address, pageList, state, KMemoryPermission.ReadAndWrite);
KernelResult result = memoryManager.MapPages(address, _pageList, state, KMemoryPermission.ReadAndWrite);
if (result == KernelResult.Success)
{
_isMapped = true;
if (!memoryManager.SupportsMemoryAliasing)
{
_storage.Borrow(process, address);
}
}
return result;
@ -112,26 +95,14 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong size,
KProcess process)
{
if (_storage == null)
{
throw new NotImplementedException();
}
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
ulong pagesCount = pageList.GetPagesCount();
if (pagesCount != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
KernelResult result = memoryManager.UnmapPages(address, pagesCount, ranges, state);
KernelResult result = memoryManager.UnmapPages(address, _pageList, state);
if (result == KernelResult.Success)
{
@ -145,7 +116,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
if (_hasBeenInitialized)
{
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _ranges) != KernelResult.Success)
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != KernelResult.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}

View file

@ -1,8 +1,4 @@
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -12,9 +8,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private readonly KPageList _pageList;
private readonly ulong _size;
private IVirtualMemoryManager _borrowerMemory;
private ulong _borrowerVa;
public SharedMemoryStorage(KernelContext context, KPageList pageList)
{
_context = context;
@ -29,24 +22,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public void Borrow(KProcess dstProcess, ulong va)
{
ulong currentOffset = 0;
foreach (KPageNode pageNode in _pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
dstProcess.CpuMemory.Write(va + currentOffset, _context.Memory.GetSpan(address + currentOffset, (int)size));
currentOffset += size;
}
_borrowerMemory = dstProcess.CpuMemory;
_borrowerVa = va;
}
public void ZeroFill()
{
for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
@ -56,8 +31,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
public ref T GetRef<T>(ulong offset) where T : unmanaged
{
if (_borrowerMemory == null)
{
if (_pageList.Nodes.Count == 1)
{
@ -67,33 +40,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
}
else
{
return ref _borrowerMemory.GetRef<T>(_borrowerVa + offset);
}
}
public IEnumerable<HostMemoryRange> GetRanges()
{
if (_borrowerMemory == null)
{
var ranges = new List<HostMemoryRange>();
foreach (KPageNode pageNode in _pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
ranges.Add(new HostMemoryRange(_context.Memory.GetPointer(address, size), size));
}
return ranges;
}
else
{
return _borrowerMemory.GetPhysicalRegions(_borrowerVa, _size);
}
}
public KPageList GetPageList()
{

View file

@ -1076,15 +1076,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
Context = _contextFactory.Create(KernelContext, Pid, 1UL << addrSpaceBits, InvalidAccessHandler, for64Bit);
if (Context.AddressSpace is MemoryManagerHostMapped)
{
MemoryManager = new KPageTableHostMapped(KernelContext, CpuMemory);
}
else
{
MemoryManager = new KPageTable(KernelContext, CpuMemory);
}
}
private bool InvalidAccessHandler(ulong va)
{

View file

@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
{
public IProcessContext Create(KernelContext context, ulong pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit)
{
return new ProcessContext(new AddressSpaceManager(addressSpaceSize));
return new ProcessContext(new AddressSpaceManager(context.Memory, addressSpaceSize));
}
}
}

View file

@ -5,6 +5,7 @@ using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Memory;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.HLE.Loaders.Executables;
using Ryujinx.Memory;
using System;
using System.Collections.Generic;
using System.IO;
@ -566,6 +567,11 @@ namespace Ryujinx.HLE.HOS.Services.Ro
_owner = context.Process.HandleTable.GetKProcess(context.Request.HandleDesc.ToCopy[0]);
context.Device.System.KernelContext.Syscall.CloseHandle(context.Request.HandleDesc.ToCopy[0]);
if (_owner?.CpuMemory is IRefCounted rc)
{
rc.IncrementReferenceCount();
}
return ResultCode.Success;
}
@ -579,6 +585,11 @@ namespace Ryujinx.HLE.HOS.Services.Ro
}
_nroInfos.Clear();
if (_owner?.CpuMemory is IRefCounted rc)
{
rc.DecrementReferenceCount();
}
}
}
}

View file

@ -1,5 +1,6 @@
using Ryujinx.Audio.Backends.CompatLayer;
using Ryujinx.Audio.Integration;
using Ryujinx.Common.Configuration;
using Ryujinx.Graphics.Gpu;
using Ryujinx.HLE.FileSystem;
using Ryujinx.HLE.HOS;
@ -48,8 +49,12 @@ namespace Ryujinx.HLE
FileSystem = Configuration.VirtualFileSystem;
UiHandler = Configuration.HostUiHandler;
MemoryAllocationFlags memoryAllocationFlags = configuration.MemoryManagerMode == MemoryManagerMode.SoftwarePageTable
? MemoryAllocationFlags.Reserve
: MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Mirrorable;
AudioDeviceDriver = new CompatLayerHardwareDeviceDriver(Configuration.AudioDeviceDriver);
Memory = new MemoryBlock(Configuration.MemoryConfiguration.ToDramSize(), MemoryAllocationFlags.Reserve);
Memory = new MemoryBlock(Configuration.MemoryConfiguration.ToDramSize(), memoryAllocationFlags);
Gpu = new GpuContext(Configuration.GpuRenderer);
System = new Horizon(this);
Statistics = new PerformanceStatistics();

View file

@ -14,7 +14,7 @@ namespace Ryujinx.Memory.Tests
{
}
public void Map(ulong va, nuint hostAddress, ulong size)
public void Map(ulong va, ulong pa, ulong size)
{
throw new NotImplementedException();
}
@ -59,9 +59,9 @@ namespace Ryujinx.Memory.Tests
throw new NotImplementedException();
}
IEnumerable<HostMemoryRange> IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size)
IEnumerable<MemoryRange> IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size)
{
return NoMappings ? new HostMemoryRange[0] : new HostMemoryRange[] { new HostMemoryRange((nuint)va, size) };
return NoMappings ? new MemoryRange[0] : new MemoryRange[] { new MemoryRange(va, size) };
}
public bool IsMapped(ulong va)

View file

@ -1,5 +1,4 @@
using NUnit.Framework;
using Ryujinx.Memory;
using System;
using System.Runtime.InteropServices;
@ -38,5 +37,48 @@ namespace Ryujinx.Memory.Tests
Assert.AreEqual(Marshal.ReadInt32(_memoryBlock.Pointer, 0x2040), 0xbadc0de);
}
[Test, Explicit]
public void Test_Alias()
{
using MemoryBlock backing = new MemoryBlock(0x10000, MemoryAllocationFlags.Mirrorable);
using MemoryBlock toAlias = new MemoryBlock(0x10000, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible);
toAlias.MapView(backing, 0x1000, 0, 0x4000);
toAlias.UnmapView(backing, 0x3000, 0x1000);
toAlias.Write(0, 0xbadc0de);
Assert.AreEqual(Marshal.ReadInt32(backing.Pointer, 0x1000), 0xbadc0de);
}
[Test, Explicit]
public void Test_AliasRandom()
{
using MemoryBlock backing = new MemoryBlock(0x80000, MemoryAllocationFlags.Mirrorable);
using MemoryBlock toAlias = new MemoryBlock(0x80000, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible);
Random rng = new Random(123);
for (int i = 0; i < 20000; i++)
{
int srcPage = rng.Next(0, 64);
int dstPage = rng.Next(0, 64);
int pages = rng.Next(1, 65);
if ((rng.Next() & 1) != 0)
{
toAlias.MapView(backing, (ulong)srcPage << 12, (ulong)dstPage << 12, (ulong)pages << 12);
int offset = rng.Next(0, 0x1000 - sizeof(int));
toAlias.Write((ulong)((dstPage << 12) + offset), 0xbadc0de);
Assert.AreEqual(Marshal.ReadInt32(backing.Pointer, (srcPage << 12) + offset), 0xbadc0de);
}
else
{
toAlias.UnmapView(backing, (ulong)dstPage << 12, (ulong)pages << 12);
}
}
}
}
}

View file

@ -13,9 +13,9 @@ namespace Ryujinx.Memory
/// </summary>
public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock
{
public const int PageBits = PageTable<nuint>.PageBits;
public const int PageSize = PageTable<nuint>.PageSize;
public const int PageMask = PageTable<nuint>.PageMask;
public const int PageBits = PageTable<ulong>.PageBits;
public const int PageSize = PageTable<ulong>.PageSize;
public const int PageMask = PageTable<ulong>.PageMask;
/// <summary>
/// Address space width in bits.
@ -24,14 +24,15 @@ namespace Ryujinx.Memory
private readonly ulong _addressSpaceSize;
private readonly PageTable<nuint> _pageTable;
private readonly MemoryBlock _backingMemory;
private readonly PageTable<ulong> _pageTable;
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
public AddressSpaceManager(ulong addressSpaceSize)
public AddressSpaceManager(MemoryBlock backingMemory, ulong addressSpaceSize)
{
ulong asSize = PageSize;
int asBits = PageBits;
@ -44,37 +45,26 @@ namespace Ryujinx.Memory
AddressSpaceBits = asBits;
_addressSpaceSize = asSize;
_pageTable = new PageTable<nuint>();
_backingMemory = backingMemory;
_pageTable = new PageTable<ulong>();
}
/// <summary>
/// Maps a virtual memory range into a physical memory range.
/// </summary>
/// <remarks>
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="hostAddress">Physical memory address</param>
/// <param name="size">Size to be mapped</param>
public void Map(ulong va, nuint hostAddress, ulong size)
/// <inheritdoc/>
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
_pageTable.Map(va, hostAddress);
_pageTable.Map(va, pa);
va += PageSize;
hostAddress += PageSize;
pa += PageSize;
size -= PageSize;
}
}
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
/// </summary>
/// <param name="va">Virtual address of the range to be unmapped</param>
/// <param name="size">Size of the range to be unmapped</param>
/// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
@ -88,47 +78,25 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Reads data from mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being read</typeparam>
/// <param name="va">Virtual address of the data in memory</param>
/// <returns>The data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public T Read<T>(ulong va) where T : unmanaged
{
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
}
/// <summary>
/// Reads data from mapped memory.
/// </summary>
/// <param name="va">Virtual address of the data in memory</param>
/// <param name="data">Span to store the data being read into</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Read(ulong va, Span<byte> data)
{
ReadImpl(va, data);
}
/// <summary>
/// Writes data to mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being written</typeparam>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="value">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
/// <summary>
/// Writes data to mapped memory.
/// </summary>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
@ -140,7 +108,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(GetHostSpanContiguous(va, data.Length));
data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
}
else
{
@ -148,34 +116,27 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
}
}
}
/// <summary>
/// Gets a read-only span of data from mapped memory.
/// </summary>
/// <remarks>
/// This may perform a allocation if the data is not contiguous in memory.
/// For this reason, the span is read-only, you can't modify the data.
/// </remarks>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>A read-only span of the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (size == 0)
@ -185,7 +146,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
return GetHostSpanContiguous(va, size);
return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
}
else
{
@ -197,19 +158,7 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Gets a region of memory that can be written to.
/// </summary>
/// <remarks>
/// If the requested region is not contiguous in physical memory,
/// this will perform an allocation, and flush the data (writing it
/// back to the backing memory) on disposal.
/// </remarks>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region of memory containing the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public unsafe WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (size == 0)
@ -219,7 +168,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
}
else
{
@ -231,33 +180,18 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Gets a reference for the given type at the specified virtual memory address.
/// </summary>
/// <remarks>
/// The data must be located at a contiguous memory region.
/// </remarks>
/// <typeparam name="T">Type of the data to get the reference</typeparam>
/// <param name="va">Virtual address of the data</param>
/// <returns>A reference to the data in memory</returns>
/// <exception cref="MemoryNotContiguousException">Throw if the specified memory region is not contiguous in physical memory</exception>
public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
/// <inheritdoc/>
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
return ref *(T*)GetHostAddress(va);
return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
}
/// <summary>
/// Computes the number of pages in a virtual address range.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="startVa">The virtual address of the beginning of the first page</param>
/// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int GetPagesCount(ulong va, uint size, out ulong startVa)
{
@ -268,7 +202,7 @@ namespace Ryujinx.Memory
return (int)(vaSpan / PageSize);
}
private void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
@ -290,7 +224,7 @@ namespace Ryujinx.Memory
return false;
}
if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
@ -301,18 +235,12 @@ namespace Ryujinx.Memory
return true;
}
/// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
/// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
return Enumerable.Empty<MemoryRange>();
}
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
@ -322,9 +250,9 @@ namespace Ryujinx.Memory
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<HostMemoryRange>();
var regions = new List<MemoryRange>();
nuint regionStart = GetHostAddress(va);
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
@ -334,12 +262,12 @@ namespace Ryujinx.Memory
return null;
}
nuint newHostAddress = GetHostAddress(va + PageSize);
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetHostAddress(va) + PageSize != newHostAddress)
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new HostMemoryRange(regionStart, regionSize));
regionStart = newHostAddress;
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
@ -347,7 +275,7 @@ namespace Ryujinx.Memory
regionSize += PageSize;
}
regions.Add(new HostMemoryRange(regionStart, regionSize));
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
@ -365,26 +293,26 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
}
}
/// <summary>
/// Checks if the page at a given virtual address is mapped.
/// </summary>
/// <param name="va">Virtual address to check</param>
/// <returns>True if the address is mapped, false otherwise</returns>
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsMapped(ulong va)
{
@ -396,12 +324,7 @@ namespace Ryujinx.Memory
return _pageTable.Read(va) != 0;
}
/// <summary>
/// Checks if a memory range is mapped.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>True if the entire range is mapped, false otherwise</returns>
/// <inheritdoc/>
public bool IsRangeMapped(ulong va, ulong size)
{
if (size == 0UL)
@ -460,14 +383,9 @@ namespace Ryujinx.Memory
}
}
private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
private ulong GetPhysicalAddressInternal(ulong va)
{
return new Span<byte>((void*)GetHostAddress(va), size);
}
private nuint GetHostAddress(ulong va)
{
return _pageTable.Read(va) + (nuint)(va & PageMask);
return _pageTable.Read(va) + (va & PageMask);
}
/// <summary>

View file

@ -13,9 +13,9 @@ namespace Ryujinx.Memory
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="hostAddress">Pointer where the region should be mapped to</param>
/// <param name="pa">Physical memory address where the region should be mapped to</param>
/// <param name="size">Size to be mapped</param>
void Map(ulong va, nuint hostAddress, ulong size);
void Map(ulong va, ulong pa, ulong size);
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
@ -111,7 +111,7 @@ namespace Ryujinx.Memory
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <summary>
/// Checks if the page at a given CPU virtual address is mapped.

View file

@ -29,6 +29,18 @@ namespace Ryujinx.Memory
/// Enables mirroring of the memory block through aliasing of memory pages.
/// When enabled, this allows creating more memory blocks sharing the same backing storage.
/// </summary>
Mirrorable = 1 << 2
Mirrorable = 1 << 2,
/// <summary>
/// Indicates that the memory block should support mapping views of a mirrorable memory block.
/// The block that is to have their views mapped should be created with the <see cref="Mirrorable"/> flag.
/// </summary>
ViewCompatible = 1 << 3,
/// <summary>
/// Forces views to be mapped page by page on Windows. When partial unmaps are done, this avoids the need
/// to unmap the full range and remap sub-ranges, which creates a time window with incorrectly unmapped memory.
/// </summary>
ForceWindows4KBViewMapping = 1 << 4
}
}

View file

@ -1,4 +1,5 @@
using System;
using System.Collections.Concurrent;
using System.Runtime.CompilerServices;
using System.Threading;
@ -11,8 +12,12 @@ namespace Ryujinx.Memory
{
private readonly bool _usesSharedMemory;
private readonly bool _isMirror;
private readonly bool _viewCompatible;
private readonly bool _forceWindows4KBView;
private IntPtr _sharedMemory;
private IntPtr _pointer;
private ConcurrentDictionary<MemoryBlock, byte> _viewStorages;
private int _viewCount;
/// <summary>
/// Pointer to the memory block data.
@ -36,12 +41,14 @@ namespace Ryujinx.Memory
if (flags.HasFlag(MemoryAllocationFlags.Mirrorable))
{
_sharedMemory = MemoryManagement.CreateSharedMemory(size, flags.HasFlag(MemoryAllocationFlags.Reserve));
_pointer = MemoryManagement.MapSharedMemory(_sharedMemory);
_pointer = MemoryManagement.MapSharedMemory(_sharedMemory, size);
_usesSharedMemory = true;
}
else if (flags.HasFlag(MemoryAllocationFlags.Reserve))
{
_pointer = MemoryManagement.Reserve(size);
_viewCompatible = flags.HasFlag(MemoryAllocationFlags.ViewCompatible);
_forceWindows4KBView = flags.HasFlag(MemoryAllocationFlags.ForceWindows4KBViewMapping);
_pointer = MemoryManagement.Reserve(size, _viewCompatible);
}
else
{
@ -49,6 +56,10 @@ namespace Ryujinx.Memory
}
Size = size;
_viewStorages = new ConcurrentDictionary<MemoryBlock, byte>();
_viewStorages.TryAdd(this, 0);
_viewCount = 1;
}
/// <summary>
@ -60,7 +71,7 @@ namespace Ryujinx.Memory
/// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
private MemoryBlock(ulong size, IntPtr sharedMemory)
{
_pointer = MemoryManagement.MapSharedMemory(sharedMemory);
_pointer = MemoryManagement.MapSharedMemory(sharedMemory, size);
Size = size;
_usesSharedMemory = true;
_isMirror = true;
@ -112,6 +123,42 @@ namespace Ryujinx.Memory
return MemoryManagement.Decommit(GetPointerInternal(offset, size), size);
}
/// <summary>
/// Maps a view of memory from another memory block.
/// </summary>
/// <param name="srcBlock">Memory block from where the backing memory will be taken</param>
/// <param name="srcOffset">Offset on <paramref name="srcBlock"/> of the region that should be mapped</param>
/// <param name="dstOffset">Offset to map the view into on this block</param>
/// <param name="size">Size of the range to be mapped</param>
/// <exception cref="NotSupportedException">Throw when the source memory block does not support mirroring</exception>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
if (srcBlock._sharedMemory == IntPtr.Zero)
{
throw new ArgumentException("The source memory block is not mirrorable, and thus cannot be mapped on the current block.");
}
if (_viewStorages.TryAdd(srcBlock, 0))
{
srcBlock.IncrementViewCount();
}
MemoryManagement.MapView(srcBlock._sharedMemory, srcOffset, GetPointerInternal(dstOffset, size), size, _forceWindows4KBView);
}
/// <summary>
/// Unmaps a view of memory from another memory block.
/// </summary>
/// <param name="srcBlock">Memory block from where the backing memory was taken during map</param>
/// <param name="offset">Offset of the view previously mapped with <see cref="MapView"/></param>
/// <param name="size">Size of the range to be unmapped</param>
public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
{
MemoryManagement.UnmapView(srcBlock._sharedMemory, GetPointerInternal(offset, size), size, _forceWindows4KBView);
}
/// <summary>
/// Reprotects a region of memory.
/// </summary>
@ -124,21 +171,7 @@ namespace Ryujinx.Memory
/// <exception cref="MemoryProtectionException">Throw when <paramref name="permission"/> is invalid</exception>
public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail = true)
{
MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission, throwOnFail);
}
/// <summary>
/// Remaps a region of memory into this memory block.
/// </summary>
/// <param name="offset">Starting offset of the range to be remapped into</param>
/// <param name="sourceAddress">Starting offset of the range to be remapped from</param>
/// <param name="size">Size of the range to be remapped</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
/// <exception cref="MemoryProtectionException">Throw when <paramref name="permission"/> is invalid</exception>
public void Remap(ulong offset, IntPtr sourceAddress, ulong size)
{
MemoryManagement.Remap(GetPointerInternal(offset, size), sourceAddress, size);
MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission, _viewCompatible, _forceWindows4KBView, throwOnFail);
}
/// <summary>
@ -274,7 +307,7 @@ namespace Ryujinx.Memory
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public nuint GetPointer(ulong offset, ulong size) => (nuint)(ulong)GetPointerInternal(offset, size);
public IntPtr GetPointer(ulong offset, ulong size) => GetPointerInternal(offset, size);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private IntPtr GetPointerInternal(ulong offset, ulong size)
@ -367,22 +400,63 @@ namespace Ryujinx.Memory
{
if (_usesSharedMemory)
{
MemoryManagement.UnmapSharedMemory(ptr);
if (_sharedMemory != IntPtr.Zero && !_isMirror)
{
MemoryManagement.DestroySharedMemory(_sharedMemory);
_sharedMemory = IntPtr.Zero;
}
MemoryManagement.UnmapSharedMemory(ptr, Size);
}
else
{
MemoryManagement.Free(ptr);
}
foreach (MemoryBlock viewStorage in _viewStorages.Keys)
{
viewStorage.DecrementViewCount();
}
_viewStorages.Clear();
}
}
private void ThrowObjectDisposed() => throw new ObjectDisposedException(nameof(MemoryBlock));
private void ThrowInvalidMemoryRegionException() => throw new InvalidMemoryRegionException();
/// <summary>
/// Increments the number of views that uses this memory block as storage.
/// </summary>
private void IncrementViewCount()
{
Interlocked.Increment(ref _viewCount);
}
/// <summary>
/// Decrements the number of views that uses this memory block as storage.
/// </summary>
private void DecrementViewCount()
{
if (Interlocked.Decrement(ref _viewCount) == 0 && _sharedMemory != IntPtr.Zero && !_isMirror)
{
MemoryManagement.DestroySharedMemory(_sharedMemory);
_sharedMemory = IntPtr.Zero;
}
}
/// <summary>
/// Checks if the specified memory allocation flags are supported on the current platform.
/// </summary>
/// <param name="flags">Flags to be checked</param>
/// <returns>True if the platform supports all the flags, false otherwise</returns>
public static bool SupportsFlags(MemoryAllocationFlags flags)
{
if (flags.HasFlag(MemoryAllocationFlags.ViewCompatible))
{
if (OperatingSystem.IsWindows())
{
return OperatingSystem.IsWindowsVersionAtLeast(10, 0, 17134);
}
return OperatingSystem.IsLinux() || OperatingSystem.IsMacOS();
}
return true;
}
private static void ThrowObjectDisposed() => throw new ObjectDisposedException(nameof(MemoryBlock));
private static void ThrowInvalidMemoryRegionException() => throw new InvalidMemoryRegionException();
}
}

View file

@ -12,8 +12,7 @@ namespace Ryujinx.Memory
return MemoryManagementWindows.Allocate(sizeNint);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Allocate(size);
}
@ -23,16 +22,15 @@ namespace Ryujinx.Memory
}
}
public static IntPtr Reserve(ulong size)
public static IntPtr Reserve(ulong size, bool viewCompatible)
{
if (OperatingSystem.IsWindows())
{
IntPtr sizeNint = new IntPtr((long)size);
return MemoryManagementWindows.Reserve(sizeNint);
return MemoryManagementWindows.Reserve(sizeNint, viewCompatible);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Reserve(size);
}
@ -50,8 +48,7 @@ namespace Ryujinx.Memory
return MemoryManagementWindows.Commit(address, sizeNint);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Commit(address, size);
}
@ -69,8 +66,7 @@ namespace Ryujinx.Memory
return MemoryManagementWindows.Decommit(address, sizeNint);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Decommit(address, size);
}
@ -80,7 +76,57 @@ namespace Ryujinx.Memory
}
}
public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission, bool throwOnFail)
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr address, ulong size, bool force4KBMap)
{
if (OperatingSystem.IsWindows())
{
IntPtr sizeNint = new IntPtr((long)size);
if (force4KBMap)
{
MemoryManagementWindows.MapView4KB(sharedMemory, srcOffset, address, sizeNint);
}
else
{
MemoryManagementWindows.MapView(sharedMemory, srcOffset, address, sizeNint);
}
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.MapView(sharedMemory, srcOffset, address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void UnmapView(IntPtr sharedMemory, IntPtr address, ulong size, bool force4KBMap)
{
if (OperatingSystem.IsWindows())
{
IntPtr sizeNint = new IntPtr((long)size);
if (force4KBMap)
{
MemoryManagementWindows.UnmapView4KB(address, sizeNint);
}
else
{
MemoryManagementWindows.UnmapView(sharedMemory, address, sizeNint);
}
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.UnmapView(address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission, bool forView, bool force4KBMap, bool throwOnFail)
{
bool result;
@ -88,10 +134,16 @@ namespace Ryujinx.Memory
{
IntPtr sizeNint = new IntPtr((long)size);
result = MemoryManagementWindows.Reprotect(address, sizeNint, permission);
if (forView && force4KBMap)
{
result = MemoryManagementWindows.Reprotect4KB(address, sizeNint, permission, forView);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else
{
result = MemoryManagementWindows.Reprotect(address, sizeNint, permission, forView);
}
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
result = MemoryManagementUnix.Reprotect(address, size, permission);
}
@ -112,8 +164,7 @@ namespace Ryujinx.Memory
{
return MemoryManagementWindows.Free(address);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Free(address);
}
@ -131,8 +182,7 @@ namespace Ryujinx.Memory
return MemoryManagementWindows.CreateSharedMemory(sizeNint, reserve);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.CreateSharedMemory(size, reserve);
}
@ -148,8 +198,7 @@ namespace Ryujinx.Memory
{
MemoryManagementWindows.DestroySharedMemory(handle);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.DestroySharedMemory(handle);
}
@ -159,16 +208,15 @@ namespace Ryujinx.Memory
}
}
public static IntPtr MapSharedMemory(IntPtr handle)
public static IntPtr MapSharedMemory(IntPtr handle, ulong size)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.MapSharedMemory(handle);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.MapSharedMemory(handle);
return MemoryManagementUnix.MapSharedMemory(handle, size);
}
else
{
@ -176,29 +224,15 @@ namespace Ryujinx.Memory
}
}
public static void UnmapSharedMemory(IntPtr address)
public static void UnmapSharedMemory(IntPtr address, ulong size)
{
if (OperatingSystem.IsWindows())
{
MemoryManagementWindows.UnmapSharedMemory(address);
}
else if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.UnmapSharedMemory(address);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static IntPtr Remap(IntPtr target, IntPtr source, ulong size)
{
if (OperatingSystem.IsLinux() ||
OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Remap(target, source, size);
MemoryManagementUnix.UnmapSharedMemory(address, size);
}
else
{

View file

@ -1,8 +1,7 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Runtime.Versioning;
using System.Text;
using static Ryujinx.Memory.MemoryManagerUnixHelper;
@ -12,15 +11,6 @@ namespace Ryujinx.Memory
[SupportedOSPlatform("macos")]
static class MemoryManagementUnix
{
private struct UnixSharedMemory
{
public IntPtr Pointer;
public ulong Size;
public IntPtr SourcePointer;
}
private static readonly List<UnixSharedMemory> _sharedMemory = new List<UnixSharedMemory>();
private static readonly ConcurrentDictionary<IntPtr, ulong> _sharedMemorySource = new ConcurrentDictionary<IntPtr, ulong>();
private static readonly ConcurrentDictionary<IntPtr, ulong> _allocations = new ConcurrentDictionary<IntPtr, ulong>();
public static IntPtr Allocate(ulong size)
@ -69,40 +59,15 @@ namespace Ryujinx.Memory
public static bool Commit(IntPtr address, ulong size)
{
bool success = mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0;
if (success)
{
foreach (var shared in _sharedMemory)
{
if ((ulong)address + size > (ulong)shared.SourcePointer && (ulong)address < (ulong)shared.SourcePointer + shared.Size)
{
ulong sharedAddress = ((ulong)address - (ulong)shared.SourcePointer) + (ulong)shared.Pointer;
if (mprotect((IntPtr)sharedAddress, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) != 0)
{
return false;
}
}
}
}
return success;
return mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0;
}
public static bool Decommit(IntPtr address, ulong size)
{
bool isShared;
lock (_sharedMemory)
{
isShared = _sharedMemory.Exists(x => (ulong)address >= (ulong)x.Pointer && (ulong)address + size <= (ulong)x.Pointer + x.Size);
}
// Must be writable for madvise to work properly.
mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE);
madvise(address, size, isShared ? MADV_REMOVE : MADV_DONTNEED);
madvise(address, size, MADV_REMOVE);
return mprotect(address, size, MmapProts.PROT_NONE) == 0;
}
@ -136,139 +101,83 @@ namespace Ryujinx.Memory
return false;
}
public static IntPtr Remap(IntPtr target, IntPtr source, ulong size)
public static bool Unmap(IntPtr address, ulong size)
{
int flags = 1;
if (target != IntPtr.Zero)
{
flags |= 2;
return munmap(address, size) == 0;
}
IntPtr result = mremap(source, 0, size, flags, target);
if (result == IntPtr.Zero)
public unsafe static IntPtr CreateSharedMemory(ulong size, bool reserve)
{
throw new InvalidOperationException();
}
int fd;
return result;
}
public static IntPtr CreateSharedMemory(ulong size, bool reserve)
if (OperatingSystem.IsMacOS())
{
IntPtr result = AllocateInternal(
size,
reserve ? MmapProts.PROT_NONE : MmapProts.PROT_READ | MmapProts.PROT_WRITE,
true);
byte[] memName = Encoding.ASCII.GetBytes("Ryujinx-XXXXXX");
if (result == IntPtr.Zero)
fixed (byte* pMemName = memName)
{
fd = shm_open((IntPtr)pMemName, 0x2 | 0x200 | 0x800 | 0x400, 384); // O_RDWR | O_CREAT | O_EXCL | O_TRUNC, 0600
if (fd == -1)
{
throw new OutOfMemoryException();
}
_sharedMemorySource[result] = (ulong)size;
if (shm_unlink((IntPtr)pMemName) != 0)
{
throw new OutOfMemoryException();
}
}
}
else
{
byte[] fileName = Encoding.ASCII.GetBytes("/dev/shm/Ryujinx-XXXXXX");
return result;
fixed (byte* pFileName = fileName)
{
fd = mkstemp((IntPtr)pFileName);
if (fd == -1)
{
throw new OutOfMemoryException();
}
if (unlink((IntPtr)pFileName) != 0)
{
throw new OutOfMemoryException();
}
}
}
if (ftruncate(fd, (IntPtr)size) != 0)
{
throw new OutOfMemoryException();
}
return (IntPtr)fd;
}
public static void DestroySharedMemory(IntPtr handle)
{
lock (_sharedMemory)
{
foreach (var memory in _sharedMemory)
{
if (memory.SourcePointer == handle)
{
throw new InvalidOperationException("Shared memory cannot be destroyed unless fully unmapped.");
}
}
close((int)handle);
}
_sharedMemorySource.Remove(handle, out ulong _);
public static IntPtr MapSharedMemory(IntPtr handle, ulong size)
{
return mmap(IntPtr.Zero, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE, MmapFlags.MAP_SHARED, (int)handle, 0);
}
public static IntPtr MapSharedMemory(IntPtr handle)
public static void UnmapSharedMemory(IntPtr address, ulong size)
{
// Try find the handle for this shared memory. If it is mapped, then we want to map
// it a second time in another location.
// If it is not mapped, then its handle is the mapping.
ulong size = _sharedMemorySource[handle];
if (size == 0)
{
throw new InvalidOperationException("Shared memory cannot be mapped after its source is unmapped.");
munmap(address, size);
}
lock (_sharedMemory)
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, ulong size)
{
foreach (var memory in _sharedMemory)
{
if (memory.Pointer == handle)
{
IntPtr result = AllocateInternal(
memory.Size,
MmapProts.PROT_NONE
);
if (result == IntPtr.Zero)
{
throw new OutOfMemoryException();
mmap(location, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE, MmapFlags.MAP_FIXED | MmapFlags.MAP_SHARED, (int)sharedMemory, (long)srcOffset);
}
Remap(result, handle, memory.Size);
_sharedMemory.Add(new UnixSharedMemory
public static void UnmapView(IntPtr location, ulong size)
{
Pointer = result,
Size = memory.Size,
SourcePointer = handle
});
return result;
}
}
_sharedMemory.Add(new UnixSharedMemory
{
Pointer = handle,
Size = size,
SourcePointer = handle
});
}
return handle;
}
public static void UnmapSharedMemory(IntPtr address)
{
lock (_sharedMemory)
{
int removed = _sharedMemory.RemoveAll(memory =>
{
if (memory.Pointer == address)
{
if (memory.Pointer == memory.SourcePointer)
{
// After removing the original mapping, it cannot be mapped again.
_sharedMemorySource[memory.SourcePointer] = 0;
}
Free(address);
return true;
}
return false;
});
if (removed == 0)
{
throw new InvalidOperationException("Shared memory mapping could not be found.");
}
}
mmap(location, size, MmapProts.PROT_NONE, MmapFlags.MAP_FIXED, -1, 0);
}
}
}

View file

@ -1,7 +1,5 @@
using Ryujinx.Memory.WindowsShared;
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Runtime.Versioning;
namespace Ryujinx.Memory
@ -9,74 +7,42 @@ namespace Ryujinx.Memory
[SupportedOSPlatform("windows")]
static class MemoryManagementWindows
{
private static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
private static bool UseWin10Placeholders;
private const int PageSize = 0x1000;
private static object _emulatedHandleLock = new object();
private static EmulatedSharedMemoryWindows[] _emulatedShared = new EmulatedSharedMemoryWindows[64];
private static List<EmulatedSharedMemoryWindows> _emulatedSharedList = new List<EmulatedSharedMemoryWindows>();
[DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr VirtualAlloc(
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool VirtualProtect(
IntPtr lpAddress,
IntPtr dwSize,
MemoryProtection flNewProtect,
out MemoryProtection lpflOldProtect);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr CreateFileMapping(
IntPtr hFile,
IntPtr lpFileMappingAttributes,
FileMapProtection flProtect,
uint dwMaximumSizeHigh,
uint dwMaximumSizeLow,
[MarshalAs(UnmanagedType.LPWStr)] string lpName);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool CloseHandle(IntPtr hObject);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr MapViewOfFile(
IntPtr hFileMappingObject,
uint dwDesiredAccess,
uint dwFileOffsetHigh,
uint dwFileOffsetLow,
IntPtr dwNumberOfBytesToMap);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool UnmapViewOfFile(IntPtr lpBaseAddress);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern uint GetLastError();
static MemoryManagementWindows()
{
UseWin10Placeholders = OperatingSystem.IsWindowsVersionAtLeast(10, 0, 17134);
}
private static readonly PlaceholderManager _placeholders = new PlaceholderManager();
public static IntPtr Allocate(IntPtr size)
{
return AllocateInternal(size, AllocationType.Reserve | AllocationType.Commit);
}
public static IntPtr Reserve(IntPtr size)
public static IntPtr Reserve(IntPtr size, bool viewCompatible)
{
if (viewCompatible)
{
IntPtr baseAddress = AllocateInternal2(size, AllocationType.Reserve | AllocationType.ReservePlaceholder);
_placeholders.ReserveRange((ulong)baseAddress, (ulong)size);
return baseAddress;
}
return AllocateInternal(size, AllocationType.Reserve);
}
private static IntPtr AllocateInternal(IntPtr size, AllocationType flags = 0)
{
IntPtr ptr = VirtualAlloc(IntPtr.Zero, size, flags, MemoryProtection.ReadWrite);
IntPtr ptr = WindowsApi.VirtualAlloc(IntPtr.Zero, size, flags, MemoryProtection.ReadWrite);
if (ptr == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
return ptr;
}
private static IntPtr AllocateInternal2(IntPtr size, AllocationType flags = 0)
{
IntPtr ptr = WindowsApi.VirtualAlloc2(WindowsApi.CurrentProcessHandle, IntPtr.Zero, size, flags, MemoryProtection.NoAccess, IntPtr.Zero, 0);
if (ptr == IntPtr.Zero)
{
@ -88,129 +54,114 @@ namespace Ryujinx.Memory
public static bool Commit(IntPtr location, IntPtr size)
{
if (UseWin10Placeholders)
{
lock (_emulatedSharedList)
{
foreach (var shared in _emulatedSharedList)
{
if (shared.CommitMap(location, size))
{
return true;
}
}
}
}
return VirtualAlloc(location, size, AllocationType.Commit, MemoryProtection.ReadWrite) != IntPtr.Zero;
return WindowsApi.VirtualAlloc(location, size, AllocationType.Commit, MemoryProtection.ReadWrite) != IntPtr.Zero;
}
public static bool Decommit(IntPtr location, IntPtr size)
{
if (UseWin10Placeholders)
{
lock (_emulatedSharedList)
{
foreach (var shared in _emulatedSharedList)
{
if (shared.DecommitMap(location, size))
{
return true;
return WindowsApi.VirtualFree(location, size, AllocationType.Decommit);
}
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size)
{
_placeholders.MapView(sharedMemory, srcOffset, location, size);
}
public static void MapView4KB(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size)
{
ulong uaddress = (ulong)location;
ulong usize = (ulong)size;
IntPtr endLocation = (IntPtr)(uaddress + usize);
while (location != endLocation)
{
WindowsApi.VirtualFree(location, (IntPtr)PageSize, AllocationType.Release | AllocationType.PreservePlaceholder);
var ptr = WindowsApi.MapViewOfFile3(
sharedMemory,
WindowsApi.CurrentProcessHandle,
location,
srcOffset,
(IntPtr)PageSize,
0x4000,
MemoryProtection.ReadWrite,
IntPtr.Zero,
0);
if (ptr == IntPtr.Zero)
{
throw new WindowsApiException("MapViewOfFile3");
}
location += PageSize;
srcOffset += PageSize;
}
}
return VirtualFree(location, size, AllocationType.Decommit);
public static void UnmapView(IntPtr sharedMemory, IntPtr location, IntPtr size)
{
_placeholders.UnmapView(sharedMemory, location, size);
}
public static bool Reprotect(IntPtr address, IntPtr size, MemoryPermission permission)
public static void UnmapView4KB(IntPtr location, IntPtr size)
{
if (UseWin10Placeholders)
ulong uaddress = (ulong)location;
ulong usize = (ulong)size;
IntPtr endLocation = (IntPtr)(uaddress + usize);
while (location != endLocation)
{
bool result = WindowsApi.UnmapViewOfFile2(WindowsApi.CurrentProcessHandle, location, 2);
if (!result)
{
throw new WindowsApiException("UnmapViewOfFile2");
}
location += PageSize;
}
}
public static bool Reprotect(IntPtr address, IntPtr size, MemoryPermission permission, bool forView)
{
if (forView)
{
return _placeholders.ReprotectView(address, size, permission);
}
else
{
return WindowsApi.VirtualProtect(address, size, WindowsApi.GetProtection(permission), out _);
}
}
public static bool Reprotect4KB(IntPtr address, IntPtr size, MemoryPermission permission, bool forView)
{
ulong uaddress = (ulong)address;
ulong usize = (ulong)size;
while (usize > 0)
{
ulong nextGranular = (uaddress & ~EmulatedSharedMemoryWindows.MappingMask) + EmulatedSharedMemoryWindows.MappingGranularity;
ulong mapSize = Math.Min(usize, nextGranular - uaddress);
if (!VirtualProtect((IntPtr)uaddress, (IntPtr)mapSize, GetProtection(permission), out _))
if (!WindowsApi.VirtualProtect((IntPtr)uaddress, (IntPtr)PageSize, WindowsApi.GetProtection(permission), out _))
{
return false;
}
uaddress = nextGranular;
usize -= mapSize;
uaddress += PageSize;
usize -= PageSize;
}
return true;
}
else
{
return VirtualProtect(address, size, GetProtection(permission), out _);
}
}
private static MemoryProtection GetProtection(MemoryPermission permission)
{
return permission switch
{
MemoryPermission.None => MemoryProtection.NoAccess,
MemoryPermission.Read => MemoryProtection.ReadOnly,
MemoryPermission.ReadAndWrite => MemoryProtection.ReadWrite,
MemoryPermission.ReadAndExecute => MemoryProtection.ExecuteRead,
MemoryPermission.ReadWriteExecute => MemoryProtection.ExecuteReadWrite,
MemoryPermission.Execute => MemoryProtection.Execute,
_ => throw new MemoryProtectionException(permission)
};
}
public static bool Free(IntPtr address)
{
return VirtualFree(address, IntPtr.Zero, AllocationType.Release);
}
private static int GetEmulatedHandle()
{
// Assumes we have the handle lock.
for (int i = 0; i < _emulatedShared.Length; i++)
{
if (_emulatedShared[i] == null)
{
return i + 1;
}
}
throw new InvalidProgramException("Too many shared memory handles were created.");
}
public static bool EmulatedHandleValid(ref int handle)
{
handle--;
return handle >= 0 && handle < _emulatedShared.Length && _emulatedShared[handle] != null;
return WindowsApi.VirtualFree(address, IntPtr.Zero, AllocationType.Release);
}
public static IntPtr CreateSharedMemory(IntPtr size, bool reserve)
{
if (UseWin10Placeholders && reserve)
{
lock (_emulatedHandleLock)
{
int handle = GetEmulatedHandle();
_emulatedShared[handle - 1] = new EmulatedSharedMemoryWindows((ulong)size);
_emulatedSharedList.Add(_emulatedShared[handle - 1]);
return (IntPtr)handle;
}
}
else
{
var prot = reserve ? FileMapProtection.SectionReserve : FileMapProtection.SectionCommit;
IntPtr handle = CreateFileMapping(
InvalidHandleValue,
IntPtr handle = WindowsApi.CreateFileMapping(
WindowsApi.InvalidHandleValue,
IntPtr.Zero,
FileMapProtection.PageReadWrite | prot,
(uint)(size.ToInt64() >> 32),
@ -224,28 +175,10 @@ namespace Ryujinx.Memory
return handle;
}
}
public static void DestroySharedMemory(IntPtr handle)
{
if (UseWin10Placeholders)
{
lock (_emulatedHandleLock)
{
int iHandle = (int)(ulong)handle;
if (EmulatedHandleValid(ref iHandle))
{
_emulatedSharedList.Remove(_emulatedShared[iHandle]);
_emulatedShared[iHandle].Dispose();
_emulatedShared[iHandle] = null;
return;
}
}
}
if (!CloseHandle(handle))
if (!WindowsApi.CloseHandle(handle))
{
throw new ArgumentException("Invalid handle.", nameof(handle));
}
@ -253,20 +186,7 @@ namespace Ryujinx.Memory
public static IntPtr MapSharedMemory(IntPtr handle)
{
if (UseWin10Placeholders)
{
lock (_emulatedHandleLock)
{
int iHandle = (int)(ulong)handle;
if (EmulatedHandleValid(ref iHandle))
{
return _emulatedShared[iHandle].Map();
}
}
}
IntPtr ptr = MapViewOfFile(handle, 4 | 2, 0, 0, IntPtr.Zero);
IntPtr ptr = WindowsApi.MapViewOfFile(handle, 4 | 2, 0, 0, IntPtr.Zero);
if (ptr == IntPtr.Zero)
{
@ -278,24 +198,15 @@ namespace Ryujinx.Memory
public static void UnmapSharedMemory(IntPtr address)
{
if (UseWin10Placeholders)
{
lock (_emulatedHandleLock)
{
foreach (EmulatedSharedMemoryWindows shared in _emulatedSharedList)
{
if (shared.Unmap((ulong)address))
{
return;
}
}
}
}
if (!UnmapViewOfFile(address))
if (!WindowsApi.UnmapViewOfFile(address))
{
throw new ArgumentException("Invalid address.", nameof(address));
}
}
public static bool RetryFromAccessViolation()
{
return _placeholders.RetryFromAccessViolation();
}
}
}

View file

@ -21,7 +21,23 @@ namespace Ryujinx.Memory
MAP_PRIVATE = 2,
MAP_ANONYMOUS = 4,
MAP_NORESERVE = 8,
MAP_UNLOCKED = 16
MAP_FIXED = 16,
MAP_UNLOCKED = 32
}
[Flags]
public enum OpenFlags : uint
{
O_RDONLY = 0,
O_WRONLY = 1,
O_RDWR = 2,
O_CREAT = 4,
O_EXCL = 8,
O_NOCTTY = 16,
O_TRUNC = 32,
O_APPEND = 64,
O_NONBLOCK = 128,
O_SYNC = 256,
}
private const int MAP_ANONYMOUS_LINUX_GENERIC = 0x20;
@ -50,6 +66,24 @@ namespace Ryujinx.Memory
[DllImport("libc", SetLastError = true)]
public static extern int madvise(IntPtr address, ulong size, int advice);
[DllImport("libc", SetLastError = true)]
public static extern int mkstemp(IntPtr template);
[DllImport("libc", SetLastError = true)]
public static extern int unlink(IntPtr pathname);
[DllImport("libc", SetLastError = true)]
public static extern int ftruncate(int fildes, IntPtr length);
[DllImport("libc", SetLastError = true)]
public static extern int close(int fd);
[DllImport("libc", SetLastError = true)]
public static extern int shm_open(IntPtr name, int oflag, uint mode);
[DllImport("libc", SetLastError = true)]
public static extern int shm_unlink(IntPtr name);
private static int MmapFlagsToSystemFlags(MmapFlags flags)
{
int result = 0;
@ -64,6 +98,11 @@ namespace Ryujinx.Memory
result |= (int)MmapFlags.MAP_PRIVATE;
}
if (flags.HasFlag(MmapFlags.MAP_FIXED))
{
result |= (int)MmapFlags.MAP_FIXED;
}
if (flags.HasFlag(MmapFlags.MAP_ANONYMOUS))
{
if (OperatingSystem.IsLinux())

View file

@ -1,6 +1,6 @@
namespace Ryujinx.Memory
{
class PageTable<T> where T : unmanaged
public class PageTable<T> where T : unmanaged
{
public const int PageBits = 12;
public const int PageSize = 1 << PageBits;

View file

@ -1,71 +0,0 @@
using System;
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Range of memory composed of an address and size.
/// </summary>
public struct HostMemoryRange : IEquatable<HostMemoryRange>
{
/// <summary>
/// An empty memory range, with a null address and zero size.
/// </summary>
public static HostMemoryRange Empty => new HostMemoryRange(0, 0);
/// <summary>
/// Start address of the range.
/// </summary>
public nuint Address { get; }
/// <summary>
/// Size of the range in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// Address where the range ends (exclusive).
/// </summary>
public nuint EndAddress => Address + (nuint)Size;
/// <summary>
/// Creates a new memory range with the specified address and size.
/// </summary>
/// <param name="address">Start address</param>
/// <param name="size">Size in bytes</param>
public HostMemoryRange(nuint address, ulong size)
{
Address = address;
Size = size;
}
/// <summary>
/// Checks if the range overlaps with another.
/// </summary>
/// <param name="other">The other range to check for overlap</param>
/// <returns>True if the ranges overlap, false otherwise</returns>
public bool OverlapsWith(HostMemoryRange other)
{
nuint thisAddress = Address;
nuint thisEndAddress = EndAddress;
nuint otherAddress = other.Address;
nuint otherEndAddress = other.EndAddress;
return thisAddress < otherEndAddress && otherAddress < thisEndAddress;
}
public override bool Equals(object obj)
{
return obj is HostMemoryRange other && Equals(other);
}
public bool Equals(HostMemoryRange other)
{
return Address == other.Address && Size == other.Size;
}
public override int GetHashCode()
{
return HashCode.Combine(Address, Size);
}
}
}

View file

@ -188,6 +188,30 @@ namespace Ryujinx.Memory.Tracking
return VirtualMemoryEvent(address, 1, write);
}
/// <summary>
/// Signal that a virtual memory event happened at the given location.
/// This is similar VirtualMemoryEvent, but on Windows, it might also return true after a partial unmap.
/// This should only be called from the exception handler.
/// </summary>
/// <param name="address">Virtual address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="precise">True if the access is precise, false otherwise</param>
/// <returns>True if the event triggered any tracking regions, false otherwise</returns>
public bool VirtualMemoryEventEh(ulong address, ulong size, bool write, bool precise = false)
{
// Windows has a limitation, it can't do partial unmaps.
// For this reason, we need to unmap the whole range and then remap the sub-ranges.
// When this happens, we might have caused a undesirable access violation from the time that the range was unmapped.
// In this case, try again as the memory might be mapped now.
if (OperatingSystem.IsWindows() && MemoryManagementWindows.RetryFromAccessViolation())
{
return true;
}
return VirtualMemoryEvent(address, size, write, precise);
}
/// <summary>
/// Signal that a virtual memory event happened at the given location.
/// This can be flagged as a precise event, which will avoid reprotection and call special handlers if possible.

View file

@ -1,703 +0,0 @@
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory.WindowsShared
{
class EmulatedSharedMemoryWindows : IDisposable
{
private static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
private static readonly IntPtr CurrentProcessHandle = new IntPtr(-1);
public const int MappingBits = 16; // Windows 64kb granularity.
public const ulong MappingGranularity = 1 << MappingBits;
public const ulong MappingMask = MappingGranularity - 1;
public const ulong BackingSize32GB = 32UL * 1024UL * 1024UL * 1024UL; // Reasonable max size of 32GB.
private class SharedMemoryMapping : INonOverlappingRange
{
public ulong Address { get; }
public ulong Size { get; private set; }
public ulong EndAddress { get; private set; }
public List<int> Blocks;
public SharedMemoryMapping(ulong address, ulong size, List<int> blocks = null)
{
Address = address;
Size = size;
EndAddress = address + size;
Blocks = blocks ?? new List<int>();
}
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
public void ExtendTo(ulong endAddress, RangeList<SharedMemoryMapping> list)
{
EndAddress = endAddress;
Size = endAddress - Address;
list.UpdateEndAddress(this);
}
public void AddBlocks(IEnumerable<int> blocks)
{
if (Blocks.Count > 0 && blocks.Count() > 0 && Blocks.Last() == blocks.First())
{
Blocks.AddRange(blocks.Skip(1));
}
else
{
Blocks.AddRange(blocks);
}
}
public INonOverlappingRange Split(ulong splitAddress)
{
SharedMemoryMapping newRegion = new SharedMemoryMapping(splitAddress, EndAddress - splitAddress);
int end = (int)((EndAddress + MappingMask) >> MappingBits);
int start = (int)(Address >> MappingBits);
Size = splitAddress - Address;
EndAddress = splitAddress;
int splitEndBlock = (int)((splitAddress + MappingMask) >> MappingBits);
int splitStartBlock = (int)(splitAddress >> MappingBits);
newRegion.AddBlocks(Blocks.Skip(splitStartBlock - start));
Blocks.RemoveRange(splitEndBlock - start, end - splitEndBlock);
return newRegion;
}
}
[DllImport("kernel32.dll", SetLastError = true)]
private static extern IntPtr CreateFileMapping(
IntPtr hFile,
IntPtr lpFileMappingAttributes,
FileMapProtection flProtect,
uint dwMaximumSizeHigh,
uint dwMaximumSizeLow,
[MarshalAs(UnmanagedType.LPWStr)] string lpName);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool CloseHandle(IntPtr hObject);
[DllImport("KernelBase.dll", SetLastError = true)]
private static extern IntPtr VirtualAlloc2(
IntPtr process,
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect,
IntPtr extendedParameters,
ulong parameterCount);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
[DllImport("KernelBase.dll", SetLastError = true)]
private static extern IntPtr MapViewOfFile3(
IntPtr hFileMappingObject,
IntPtr process,
IntPtr baseAddress,
ulong offset,
IntPtr dwNumberOfBytesToMap,
ulong allocationType,
MemoryProtection dwDesiredAccess,
IntPtr extendedParameters,
ulong parameterCount);
[DllImport("KernelBase.dll", SetLastError = true)]
private static extern bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags);
private ulong _size;
private object _lock = new object();
private ulong _backingSize;
private IntPtr _backingMemHandle;
private int _backingEnd;
private int _backingAllocated;
private Queue<int> _backingFreeList;
private List<ulong> _mappedBases;
private RangeList<SharedMemoryMapping> _mappings;
private SharedMemoryMapping[] _foundMappings = new SharedMemoryMapping[32];
private PlaceholderList _placeholders;
public EmulatedSharedMemoryWindows(ulong size)
{
ulong backingSize = BackingSize32GB;
_size = size;
_backingSize = backingSize;
_backingMemHandle = CreateFileMapping(
InvalidHandleValue,
IntPtr.Zero,
FileMapProtection.PageReadWrite | FileMapProtection.SectionReserve,
(uint)(backingSize >> 32),
(uint)backingSize,
null);
if (_backingMemHandle == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
_backingFreeList = new Queue<int>();
_mappings = new RangeList<SharedMemoryMapping>();
_mappedBases = new List<ulong>();
_placeholders = new PlaceholderList(size >> MappingBits);
}
private (ulong granularStart, ulong granularEnd) GetAlignedRange(ulong address, ulong size)
{
return (address & (~MappingMask), (address + size + MappingMask) & (~MappingMask));
}
private void Commit(ulong address, ulong size)
{
(ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
ulong endAddress = address + size;
lock (_lock)
{
// Search a bit before and after the new mapping.
// When adding our new mapping, we may need to join an existing mapping into our new mapping (or in some cases, to the other side!)
ulong searchStart = granularStart == 0 ? 0 : (granularStart - 1);
int mappingCount = _mappings.FindOverlapsNonOverlapping(searchStart, (granularEnd - searchStart) + 1, ref _foundMappings);
int first = -1;
int last = -1;
SharedMemoryMapping startOverlap = null;
SharedMemoryMapping endOverlap = null;
int lastIndex = (int)(address >> MappingBits);
int endIndex = (int)((endAddress + MappingMask) >> MappingBits);
int firstBlock = -1;
int endBlock = -1;
for (int i = 0; i < mappingCount; i++)
{
SharedMemoryMapping mapping = _foundMappings[i];
if (mapping.Address < address)
{
if (mapping.EndAddress >= address)
{
startOverlap = mapping;
}
if ((int)((mapping.EndAddress - 1) >> MappingBits) == lastIndex)
{
lastIndex = (int)((mapping.EndAddress + MappingMask) >> MappingBits);
firstBlock = mapping.Blocks.Last();
}
}
if (mapping.EndAddress > endAddress)
{
if (mapping.Address <= endAddress)
{
endOverlap = mapping;
}
if ((int)((mapping.Address) >> MappingBits) + 1 == endIndex)
{
endIndex = (int)((mapping.Address) >> MappingBits);
endBlock = mapping.Blocks.First();
}
}
if (mapping.OverlapsWith(address, size))
{
if (first == -1)
{
first = i;
}
last = i;
}
}
if (startOverlap == endOverlap && startOverlap != null)
{
// Already fully committed.
return;
}
var blocks = new List<int>();
int lastBlock = -1;
if (firstBlock != -1)
{
blocks.Add(firstBlock);
lastBlock = firstBlock;
}
bool hasMapped = false;
Action map = () =>
{
if (!hasMapped)
{
_placeholders.EnsurePlaceholders(address >> MappingBits, (granularEnd - granularStart) >> MappingBits, SplitPlaceholder);
hasMapped = true;
}
// There's a gap between this index and the last. Allocate blocks to fill it.
blocks.Add(MapBackingBlock(MappingGranularity * (ulong)lastIndex++));
};
if (first != -1)
{
for (int i = first; i <= last; i++)
{
SharedMemoryMapping mapping = _foundMappings[i];
int mapIndex = (int)(mapping.Address >> MappingBits);
while (lastIndex < mapIndex)
{
map();
}
if (lastBlock == mapping.Blocks[0])
{
blocks.AddRange(mapping.Blocks.Skip(1));
}
else
{
blocks.AddRange(mapping.Blocks);
}
lastIndex = (int)((mapping.EndAddress - 1) >> MappingBits) + 1;
}
}
while (lastIndex < endIndex)
{
map();
}
if (endBlock != -1 && endBlock != lastBlock)
{
blocks.Add(endBlock);
}
if (startOverlap != null && endOverlap != null)
{
// Both sides should be coalesced. Extend the start overlap to contain the end overlap, and add together their blocks.
_mappings.Remove(endOverlap);
startOverlap.ExtendTo(endOverlap.EndAddress, _mappings);
startOverlap.AddBlocks(blocks);
startOverlap.AddBlocks(endOverlap.Blocks);
}
else if (startOverlap != null)
{
startOverlap.ExtendTo(endAddress, _mappings);
startOverlap.AddBlocks(blocks);
}
else
{
var mapping = new SharedMemoryMapping(address, size, blocks);
if (endOverlap != null)
{
mapping.ExtendTo(endOverlap.EndAddress, _mappings);
mapping.AddBlocks(endOverlap.Blocks);
_mappings.Remove(endOverlap);
}
_mappings.Add(mapping);
}
}
}
private void Decommit(ulong address, ulong size)
{
(ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size);
ulong endAddress = address + size;
lock (_lock)
{
int mappingCount = _mappings.FindOverlapsNonOverlapping(granularStart, granularEnd - granularStart, ref _foundMappings);
int first = -1;
int last = -1;
for (int i = 0; i < mappingCount; i++)
{
SharedMemoryMapping mapping = _foundMappings[i];
if (mapping.OverlapsWith(address, size))
{
if (first == -1)
{
first = i;
}
last = i;
}
}
if (first == -1)
{
return; // Could not find any regions to decommit.
}
int lastReleasedBlock = -1;
bool releasedFirst = false;
bool releasedLast = false;
for (int i = last; i >= first; i--)
{
SharedMemoryMapping mapping = _foundMappings[i];
bool releaseEnd = true;
bool releaseStart = true;
if (i == last)
{
// If this is the last region, do not release the block if there is a page ahead of us, or the block continues after us. (it is keeping the block alive)
releaseEnd = last == mappingCount - 1;
// If the end region starts after the decommit end address, split and readd it after modifying its base address.
if (mapping.EndAddress > endAddress)
{
var newMapping = (SharedMemoryMapping)mapping.Split(endAddress);
_mappings.UpdateEndAddress(mapping);
_mappings.Add(newMapping);
if ((endAddress & MappingMask) != 0)
{
releaseEnd = false;
}
}
releasedLast = releaseEnd;
}
if (i == first)
{
// If this is the first region, do not release the block if there is a region behind us. (it is keeping the block alive)
releaseStart = first == 0;
// If the first region starts before the decommit address, split it by modifying its end address.
if (mapping.Address < address)
{
var oldMapping = mapping;
mapping = (SharedMemoryMapping)mapping.Split(address);
_mappings.UpdateEndAddress(oldMapping);
if ((address & MappingMask) != 0)
{
releaseStart = false;
}
}
releasedFirst = releaseStart;
}
_mappings.Remove(mapping);
ulong releasePointer = (mapping.EndAddress + MappingMask) & (~MappingMask);
for (int j = mapping.Blocks.Count - 1; j >= 0; j--)
{
int blockId = mapping.Blocks[j];
releasePointer -= MappingGranularity;
if (lastReleasedBlock == blockId)
{
// When committed regions are fragmented, multiple will have the same block id for their start/end granular block.
// Avoid releasing these blocks twice.
continue;
}
if ((j != 0 || releaseStart) && (j != mapping.Blocks.Count - 1 || releaseEnd))
{
ReleaseBackingBlock(releasePointer, blockId);
}
lastReleasedBlock = blockId;
}
}
ulong placeholderStart = (granularStart >> MappingBits) + (releasedFirst ? 0UL : 1UL);
ulong placeholderEnd = (granularEnd >> MappingBits) - (releasedLast ? 0UL : 1UL);
if (placeholderEnd > placeholderStart)
{
_placeholders.RemovePlaceholders(placeholderStart, placeholderEnd - placeholderStart, CoalescePlaceholder);
}
}
}
public bool CommitMap(IntPtr address, IntPtr size)
{
lock (_lock)
{
foreach (ulong mapping in _mappedBases)
{
ulong offset = (ulong)address - mapping;
if (offset < _size)
{
Commit(offset, (ulong)size);
return true;
}
}
}
return false;
}
public bool DecommitMap(IntPtr address, IntPtr size)
{
lock (_lock)
{
foreach (ulong mapping in _mappedBases)
{
ulong offset = (ulong)address - mapping;
if (offset < _size)
{
Decommit(offset, (ulong)size);
return true;
}
}
}
return false;
}
private int MapBackingBlock(ulong offset)
{
bool allocate = false;
int backing;
if (_backingFreeList.Count > 0)
{
backing = _backingFreeList.Dequeue();
}
else
{
if (_backingAllocated == _backingEnd)
{
// Allocate the backing.
_backingAllocated++;
allocate = true;
}
backing = _backingEnd++;
}
ulong backingOffset = MappingGranularity * (ulong)backing;
foreach (ulong baseAddress in _mappedBases)
{
CommitToMap(baseAddress, offset, MappingGranularity, backingOffset, allocate);
allocate = false;
}
return backing;
}
private void ReleaseBackingBlock(ulong offset, int id)
{
foreach (ulong baseAddress in _mappedBases)
{
DecommitFromMap(baseAddress, offset);
}
if (_backingEnd - 1 == id)
{
_backingEnd = id;
}
else
{
_backingFreeList.Enqueue(id);
}
}
public IntPtr Map()
{
IntPtr newMapping = VirtualAlloc2(
CurrentProcessHandle,
IntPtr.Zero,
(IntPtr)_size,
AllocationType.Reserve | AllocationType.ReservePlaceholder,
MemoryProtection.NoAccess,
IntPtr.Zero,
0);
if (newMapping == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
// Apply all existing mappings to the new mapping
lock (_lock)
{
int lastBlock = -1;
foreach (SharedMemoryMapping mapping in _mappings)
{
ulong blockAddress = mapping.Address & (~MappingMask);
foreach (int block in mapping.Blocks)
{
if (block != lastBlock)
{
ulong backingOffset = MappingGranularity * (ulong)block;
CommitToMap((ulong)newMapping, blockAddress, MappingGranularity, backingOffset, false);
lastBlock = block;
}
blockAddress += MappingGranularity;
}
}
_mappedBases.Add((ulong)newMapping);
}
return newMapping;
}
private void SplitPlaceholder(ulong address, ulong size)
{
ulong byteAddress = address << MappingBits;
IntPtr byteSize = (IntPtr)(size << MappingBits);
foreach (ulong mapAddress in _mappedBases)
{
bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.PreservePlaceholder | AllocationType.Release);
if (!result)
{
throw new InvalidOperationException("Placeholder could not be split.");
}
}
}
private void CoalescePlaceholder(ulong address, ulong size)
{
ulong byteAddress = address << MappingBits;
IntPtr byteSize = (IntPtr)(size << MappingBits);
foreach (ulong mapAddress in _mappedBases)
{
bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.CoalescePlaceholders | AllocationType.Release);
if (!result)
{
throw new InvalidOperationException("Placeholder could not be coalesced.");
}
}
}
private void CommitToMap(ulong mapAddress, ulong address, ulong size, ulong backingOffset, bool allocate)
{
IntPtr targetAddress = (IntPtr)(mapAddress + address);
// Assume the placeholder worked (or already exists)
// Map the backing memory into the mapped location.
IntPtr mapped = MapViewOfFile3(
_backingMemHandle,
CurrentProcessHandle,
targetAddress,
backingOffset,
(IntPtr)MappingGranularity,
0x4000, // REPLACE_PLACEHOLDER
MemoryProtection.ReadWrite,
IntPtr.Zero,
0);
if (mapped == IntPtr.Zero)
{
throw new InvalidOperationException($"Could not map view of backing memory. (va=0x{address:X16} size=0x{size:X16}, error code {Marshal.GetLastWin32Error()})");
}
if (allocate)
{
// Commit this part of the shared memory.
VirtualAlloc2(CurrentProcessHandle, targetAddress, (IntPtr)MappingGranularity, AllocationType.Commit, MemoryProtection.ReadWrite, IntPtr.Zero, 0);
}
}
private void DecommitFromMap(ulong baseAddress, ulong address)
{
UnmapViewOfFile2(CurrentProcessHandle, (IntPtr)(baseAddress + address), 2);
}
public bool Unmap(ulong baseAddress)
{
lock (_lock)
{
if (_mappedBases.Remove(baseAddress))
{
int lastBlock = -1;
foreach (SharedMemoryMapping mapping in _mappings)
{
ulong blockAddress = mapping.Address & (~MappingMask);
foreach (int block in mapping.Blocks)
{
if (block != lastBlock)
{
DecommitFromMap(baseAddress, blockAddress);
lastBlock = block;
}
blockAddress += MappingGranularity;
}
}
if (!VirtualFree((IntPtr)baseAddress, (IntPtr)0, AllocationType.Release))
{
throw new InvalidOperationException("Couldn't free mapping placeholder.");
}
return true;
}
return false;
}
}
public void Dispose()
{
// Remove all file mappings
lock (_lock)
{
foreach (ulong baseAddress in _mappedBases.ToArray())
{
Unmap(baseAddress);
}
}
// Finally, delete the file mapping.
CloseHandle(_backingMemHandle);
}
}
}

View file

@ -0,0 +1,740 @@
using System;
using System.Collections.Generic;
namespace Ryujinx.Memory.WindowsShared
{
/// <summary>
/// An Augmented Interval Tree based off of the "TreeDictionary"'s Red-Black Tree. Allows fast overlap checking of ranges.
/// </summary>
/// <typeparam name="K">Key</typeparam>
/// <typeparam name="V">Value</typeparam>
class IntervalTree<K, V> where K : IComparable<K>
{
private const int ArrayGrowthSize = 32;
private const bool Black = true;
private const bool Red = false;
private IntervalTreeNode<K, V> _root = null;
private int _count = 0;
public int Count => _count;
public IntervalTree() { }
#region Public Methods
/// <summary>
/// Gets the values of the interval whose key is <paramref name="key"/>.
/// </summary>
/// <param name="key">Key of the node value to get</param>
/// <param name="value">Value with the given <paramref name="key"/></param>
/// <returns>True if the key is on the dictionary, false otherwise</returns>
public bool TryGet(K key, out V value)
{
IntervalTreeNode<K, V> node = GetNode(key);
if (node == null)
{
value = default;
return false;
}
value = node.Value;
return true;
}
/// <summary>
/// Returns the start addresses of the intervals whose start and end keys overlap the given range.
/// </summary>
/// <param name="start">Start of the range</param>
/// <param name="end">End of the range</param>
/// <param name="overlaps">Overlaps array to place results in</param>
/// <param name="overlapCount">Index to start writing results into the array. Defaults to 0</param>
/// <returns>Number of intervals found</returns>
public int Get(K start, K end, ref IntervalTreeNode<K, V>[] overlaps, int overlapCount = 0)
{
GetNodes(_root, start, end, ref overlaps, ref overlapCount);
return overlapCount;
}
/// <summary>
/// Adds a new interval into the tree whose start is <paramref name="start"/>, end is <paramref name="end"/> and value is <paramref name="value"/>.
/// </summary>
/// <param name="start">Start of the range to add</param>
/// <param name="end">End of the range to insert</param>
/// <param name="value">Value to add</param>
/// <exception cref="ArgumentNullException"><paramref name="value"/> is null</exception>
public void Add(K start, K end, V value)
{
if (value == null)
{
throw new ArgumentNullException(nameof(value));
}
BSTInsert(start, end, value, null, out _);
}
/// <summary>
/// Removes a value from the tree, searching for it with <paramref name="key"/>.
/// </summary>
/// <param name="key">Key of the node to remove</param>
/// <returns>Number of deleted values</returns>
public int Remove(K key)
{
return Remove(GetNode(key));
}
/// <summary>
/// Removes a value from the tree, searching for it with <paramref name="key"/>.
/// </summary>
/// <param name="nodeToDelete">Node to be removed</param>
/// <returns>Number of deleted values</returns>
public int Remove(IntervalTreeNode<K, V> nodeToDelete)
{
if (nodeToDelete == null)
{
return 0;
}
Delete(nodeToDelete);
_count--;
return 1;
}
/// <summary>
/// Adds all the nodes in the dictionary into <paramref name="list"/>.
/// </summary>
/// <returns>A list of all values sorted by Key Order</returns>
public List<V> AsList()
{
List<V> list = new List<V>();
AddToList(_root, list);
return list;
}
#endregion
#region Private Methods (BST)
/// <summary>
/// Adds all values that are children of or contained within <paramref name="node"/> into <paramref name="list"/>, in Key Order.
/// </summary>
/// <param name="node">The node to search for values within</param>
/// <param name="list">The list to add values to</param>
private void AddToList(IntervalTreeNode<K, V> node, List<V> list)
{
if (node == null)
{
return;
}
AddToList(node.Left, list);
list.Add(node.Value);
AddToList(node.Right, list);
}
/// <summary>
/// Retrieve the node reference whose key is <paramref name="key"/>, or null if no such node exists.
/// </summary>
/// <param name="key">Key of the node to get</param>
/// <exception cref="ArgumentNullException"><paramref name="key"/> is null</exception>
/// <returns>Node reference in the tree</returns>
private IntervalTreeNode<K, V> GetNode(K key)
{
if (key == null)
{
throw new ArgumentNullException(nameof(key));
}
IntervalTreeNode<K, V> node = _root;
while (node != null)
{
int cmp = key.CompareTo(node.Start);
if (cmp < 0)
{
node = node.Left;
}
else if (cmp > 0)
{
node = node.Right;
}
else
{
return node;
}
}
return null;
}
/// <summary>
/// Retrieve all nodes that overlap the given start and end keys.
/// </summary>
/// <param name="start">Start of the range</param>
/// <param name="end">End of the range</param>
/// <param name="overlaps">Overlaps array to place results in</param>
/// <param name="overlapCount">Overlaps count to update</param>
private void GetNodes(IntervalTreeNode<K, V> node, K start, K end, ref IntervalTreeNode<K, V>[] overlaps, ref int overlapCount)
{
if (node == null || start.CompareTo(node.Max) >= 0)
{
return;
}
GetNodes(node.Left, start, end, ref overlaps, ref overlapCount);
bool endsOnRight = end.CompareTo(node.Start) > 0;
if (endsOnRight)
{
if (start.CompareTo(node.End) < 0)
{
if (overlaps.Length >= overlapCount)
{
Array.Resize(ref overlaps, overlapCount + ArrayGrowthSize);
}
overlaps[overlapCount++] = node;
}
GetNodes(node.Right, start, end, ref overlaps, ref overlapCount);
}
}
/// <summary>
/// Propagate an increase in max value starting at the given node, heading up the tree.
/// This should only be called if the max increases - not for rebalancing or removals.
/// </summary>
/// <param name="node">The node to start propagating from</param>
private void PropagateIncrease(IntervalTreeNode<K, V> node)
{
K max = node.Max;
IntervalTreeNode<K, V> ptr = node;
while ((ptr = ptr.Parent) != null)
{
if (max.CompareTo(ptr.Max) > 0)
{
ptr.Max = max;
}
else
{
break;
}
}
}
/// <summary>
/// Propagate recalculating max value starting at the given node, heading up the tree.
/// This fully recalculates the max value from all children when there is potential for it to decrease.
/// </summary>
/// <param name="node">The node to start propagating from</param>
private void PropagateFull(IntervalTreeNode<K, V> node)
{
IntervalTreeNode<K, V> ptr = node;
do
{
K max = ptr.End;
if (ptr.Left != null && ptr.Left.Max.CompareTo(max) > 0)
{
max = ptr.Left.Max;
}
if (ptr.Right != null && ptr.Right.Max.CompareTo(max) > 0)
{
max = ptr.Right.Max;
}
ptr.Max = max;
} while ((ptr = ptr.Parent) != null);
}
/// <summary>
/// Insertion Mechanism for the interval tree. Similar to a BST insert, with the start of the range as the key.
/// Iterates the tree starting from the root and inserts a new node where all children in the left subtree are less than <paramref name="start"/>, and all children in the right subtree are greater than <paramref name="start"/>.
/// Each node can contain multiple values, and has an end address which is the maximum of all those values.
/// Post insertion, the "max" value of the node and all parents are updated.
/// </summary>
/// <param name="start">Start of the range to insert</param>
/// <param name="end">End of the range to insert</param>
/// <param name="value">Value to insert</param>
/// <param name="updateFactoryCallback">Optional factory used to create a new value if <paramref name="start"/> is already on the tree</param>
/// <param name="outNode">Node that was inserted or modified</param>
/// <returns>True if <paramref name="start"/> was not yet on the tree, false otherwise</returns>
private bool BSTInsert(K start, K end, V value, Func<K, V, V> updateFactoryCallback, out IntervalTreeNode<K, V> outNode)
{
IntervalTreeNode<K, V> parent = null;
IntervalTreeNode<K, V> node = _root;
while (node != null)
{
parent = node;
int cmp = start.CompareTo(node.Start);
if (cmp < 0)
{
node = node.Left;
}
else if (cmp > 0)
{
node = node.Right;
}
else
{
outNode = node;
if (updateFactoryCallback != null)
{
// Replace
node.Value = updateFactoryCallback(start, node.Value);
int endCmp = end.CompareTo(node.End);
if (endCmp > 0)
{
node.End = end;
if (end.CompareTo(node.Max) > 0)
{
node.Max = end;
PropagateIncrease(node);
RestoreBalanceAfterInsertion(node);
}
}
else if (endCmp < 0)
{
node.End = end;
PropagateFull(node);
}
}
return false;
}
}
IntervalTreeNode<K, V> newNode = new IntervalTreeNode<K, V>(start, end, value, parent);
if (newNode.Parent == null)
{
_root = newNode;
}
else if (start.CompareTo(parent.Start) < 0)
{
parent.Left = newNode;
}
else
{
parent.Right = newNode;
}
PropagateIncrease(newNode);
_count++;
RestoreBalanceAfterInsertion(newNode);
outNode = newNode;
return true;
}
/// <summary>
/// Removes the value from the dictionary after searching for it with <paramref name="key">.
/// </summary>
/// <param name="key">Tree node to be removed</param>
private void Delete(IntervalTreeNode<K, V> nodeToDelete)
{
IntervalTreeNode<K, V> replacementNode;
if (LeftOf(nodeToDelete) == null || RightOf(nodeToDelete) == null)
{
replacementNode = nodeToDelete;
}
else
{
replacementNode = PredecessorOf(nodeToDelete);
}
IntervalTreeNode<K, V> tmp = LeftOf(replacementNode) ?? RightOf(replacementNode);
if (tmp != null)
{
tmp.Parent = ParentOf(replacementNode);
}
if (ParentOf(replacementNode) == null)
{
_root = tmp;
}
else if (replacementNode == LeftOf(ParentOf(replacementNode)))
{
ParentOf(replacementNode).Left = tmp;
}
else
{
ParentOf(replacementNode).Right = tmp;
}
if (replacementNode != nodeToDelete)
{
nodeToDelete.Start = replacementNode.Start;
nodeToDelete.Value = replacementNode.Value;
nodeToDelete.End = replacementNode.End;
nodeToDelete.Max = replacementNode.Max;
}
PropagateFull(replacementNode);
if (tmp != null && ColorOf(replacementNode) == Black)
{
RestoreBalanceAfterRemoval(tmp);
}
}
/// <summary>
/// Returns the node with the largest key where <paramref name="node"/> is considered the root node.
/// </summary>
/// <param name="node">Root Node</param>
/// <returns>Node with the maximum key in the tree of <paramref name="node"/></returns>
private static IntervalTreeNode<K, V> Maximum(IntervalTreeNode<K, V> node)
{
IntervalTreeNode<K, V> tmp = node;
while (tmp.Right != null)
{
tmp = tmp.Right;
}
return tmp;
}
/// <summary>
/// Finds the node whose key is immediately less than <paramref name="node"/>.
/// </summary>
/// <param name="node">Node to find the predecessor of</param>
/// <returns>Predecessor of <paramref name="node"/></returns>
private static IntervalTreeNode<K, V> PredecessorOf(IntervalTreeNode<K, V> node)
{
if (node.Left != null)
{
return Maximum(node.Left);
}
IntervalTreeNode<K, V> parent = node.Parent;
while (parent != null && node == parent.Left)
{
node = parent;
parent = parent.Parent;
}
return parent;
}
#endregion
#region Private Methods (RBL)
private void RestoreBalanceAfterRemoval(IntervalTreeNode<K, V> balanceNode)
{
IntervalTreeNode<K, V> ptr = balanceNode;
while (ptr != _root && ColorOf(ptr) == Black)
{
if (ptr == LeftOf(ParentOf(ptr)))
{
IntervalTreeNode<K, V> sibling = RightOf(ParentOf(ptr));
if (ColorOf(sibling) == Red)
{
SetColor(sibling, Black);
SetColor(ParentOf(ptr), Red);
RotateLeft(ParentOf(ptr));
sibling = RightOf(ParentOf(ptr));
}
if (ColorOf(LeftOf(sibling)) == Black && ColorOf(RightOf(sibling)) == Black)
{
SetColor(sibling, Red);
ptr = ParentOf(ptr);
}
else
{
if (ColorOf(RightOf(sibling)) == Black)
{
SetColor(LeftOf(sibling), Black);
SetColor(sibling, Red);
RotateRight(sibling);
sibling = RightOf(ParentOf(ptr));
}
SetColor(sibling, ColorOf(ParentOf(ptr)));
SetColor(ParentOf(ptr), Black);
SetColor(RightOf(sibling), Black);
RotateLeft(ParentOf(ptr));
ptr = _root;
}
}
else
{
IntervalTreeNode<K, V> sibling = LeftOf(ParentOf(ptr));
if (ColorOf(sibling) == Red)
{
SetColor(sibling, Black);
SetColor(ParentOf(ptr), Red);
RotateRight(ParentOf(ptr));
sibling = LeftOf(ParentOf(ptr));
}
if (ColorOf(RightOf(sibling)) == Black && ColorOf(LeftOf(sibling)) == Black)
{
SetColor(sibling, Red);
ptr = ParentOf(ptr);
}
else
{
if (ColorOf(LeftOf(sibling)) == Black)
{
SetColor(RightOf(sibling), Black);
SetColor(sibling, Red);
RotateLeft(sibling);
sibling = LeftOf(ParentOf(ptr));
}
SetColor(sibling, ColorOf(ParentOf(ptr)));
SetColor(ParentOf(ptr), Black);
SetColor(LeftOf(sibling), Black);
RotateRight(ParentOf(ptr));
ptr = _root;
}
}
}
SetColor(ptr, Black);
}
private void RestoreBalanceAfterInsertion(IntervalTreeNode<K, V> balanceNode)
{
SetColor(balanceNode, Red);
while (balanceNode != null && balanceNode != _root && ColorOf(ParentOf(balanceNode)) == Red)
{
if (ParentOf(balanceNode) == LeftOf(ParentOf(ParentOf(balanceNode))))
{
IntervalTreeNode<K, V> sibling = RightOf(ParentOf(ParentOf(balanceNode)));
if (ColorOf(sibling) == Red)
{
SetColor(ParentOf(balanceNode), Black);
SetColor(sibling, Black);
SetColor(ParentOf(ParentOf(balanceNode)), Red);
balanceNode = ParentOf(ParentOf(balanceNode));
}
else
{
if (balanceNode == RightOf(ParentOf(balanceNode)))
{
balanceNode = ParentOf(balanceNode);
RotateLeft(balanceNode);
}
SetColor(ParentOf(balanceNode), Black);
SetColor(ParentOf(ParentOf(balanceNode)), Red);
RotateRight(ParentOf(ParentOf(balanceNode)));
}
}
else
{
IntervalTreeNode<K, V> sibling = LeftOf(ParentOf(ParentOf(balanceNode)));
if (ColorOf(sibling) == Red)
{
SetColor(ParentOf(balanceNode), Black);
SetColor(sibling, Black);
SetColor(ParentOf(ParentOf(balanceNode)), Red);
balanceNode = ParentOf(ParentOf(balanceNode));
}
else
{
if (balanceNode == LeftOf(ParentOf(balanceNode)))
{
balanceNode = ParentOf(balanceNode);
RotateRight(balanceNode);
}
SetColor(ParentOf(balanceNode), Black);
SetColor(ParentOf(ParentOf(balanceNode)), Red);
RotateLeft(ParentOf(ParentOf(balanceNode)));
}
}
}
SetColor(_root, Black);
}
private void RotateLeft(IntervalTreeNode<K, V> node)
{
if (node != null)
{
IntervalTreeNode<K, V> right = RightOf(node);
node.Right = LeftOf(right);
if (node.Right != null)
{
node.Right.Parent = node;
}
IntervalTreeNode<K, V> nodeParent = ParentOf(node);
right.Parent = nodeParent;
if (nodeParent == null)
{
_root = right;
}
else if (node == LeftOf(nodeParent))
{
nodeParent.Left = right;
}
else
{
nodeParent.Right = right;
}
right.Left = node;
node.Parent = right;
PropagateFull(node);
}
}
private void RotateRight(IntervalTreeNode<K, V> node)
{
if (node != null)
{
IntervalTreeNode<K, V> left = LeftOf(node);
node.Left = RightOf(left);
if (node.Left != null)
{
node.Left.Parent = node;
}
IntervalTreeNode<K, V> nodeParent = ParentOf(node);
left.Parent = nodeParent;
if (nodeParent == null)
{
_root = left;
}
else if (node == RightOf(nodeParent))
{
nodeParent.Right = left;
}
else
{
nodeParent.Left = left;
}
left.Right = node;
node.Parent = left;
PropagateFull(node);
}
}
#endregion
#region Safety-Methods
// These methods save memory by allowing us to forego sentinel nil nodes, as well as serve as protection against NullReferenceExceptions.
/// <summary>
/// Returns the color of <paramref name="node"/>, or Black if it is null.
/// </summary>
/// <param name="node">Node</param>
/// <returns>The boolean color of <paramref name="node"/>, or black if null</returns>
private static bool ColorOf(IntervalTreeNode<K, V> node)
{
return node == null || node.Color;
}
/// <summary>
/// Sets the color of <paramref name="node"/> node to <paramref name="color"/>.
/// <br></br>
/// This method does nothing if <paramref name="node"/> is null.
/// </summary>
/// <param name="node">Node to set the color of</param>
/// <param name="color">Color (Boolean)</param>
private static void SetColor(IntervalTreeNode<K, V> node, bool color)
{
if (node != null)
{
node.Color = color;
}
}
/// <summary>
/// This method returns the left node of <paramref name="node"/>, or null if <paramref name="node"/> is null.
/// </summary>
/// <param name="node">Node to retrieve the left child from</param>
/// <returns>Left child of <paramref name="node"/></returns>
private static IntervalTreeNode<K, V> LeftOf(IntervalTreeNode<K, V> node)
{
return node?.Left;
}
/// <summary>
/// This method returns the right node of <paramref name="node"/>, or null if <paramref name="node"/> is null.
/// </summary>
/// <param name="node">Node to retrieve the right child from</param>
/// <returns>Right child of <paramref name="node"/></returns>
private static IntervalTreeNode<K, V> RightOf(IntervalTreeNode<K, V> node)
{
return node?.Right;
}
/// <summary>
/// Returns the parent node of <paramref name="node"/>, or null if <paramref name="node"/> is null.
/// </summary>
/// <param name="node">Node to retrieve the parent from</param>
/// <returns>Parent of <paramref name="node"/></returns>
private static IntervalTreeNode<K, V> ParentOf(IntervalTreeNode<K, V> node)
{
return node?.Parent;
}
#endregion
public bool ContainsKey(K key)
{
return GetNode(key) != null;
}
public void Clear()
{
_root = null;
_count = 0;
}
}
/// <summary>
/// Represents a node in the IntervalTree which contains start and end keys of type K, and a value of generic type V.
/// </summary>
/// <typeparam name="K">Key type of the node</typeparam>
/// <typeparam name="V">Value type of the node</typeparam>
class IntervalTreeNode<K, V>
{
public bool Color = true;
public IntervalTreeNode<K, V> Left = null;
public IntervalTreeNode<K, V> Right = null;
public IntervalTreeNode<K, V> Parent = null;
/// <summary>
/// The start of the range.
/// </summary>
public K Start;
/// <summary>
/// The end of the range.
/// </summary>
public K End;
/// <summary>
/// The maximum end value of this node and all its children.
/// </summary>
public K Max;
/// <summary>
/// Value stored on this node.
/// </summary>
public V Value;
public IntervalTreeNode(K start, K end, V value, IntervalTreeNode<K, V> parent)
{
Start = start;
End = end;
Max = end;
Value = value;
Parent = parent;
}
}
}

View file

@ -1,293 +0,0 @@
using Ryujinx.Memory.Range;
using System;
using System.Diagnostics;
namespace Ryujinx.Memory.WindowsShared
{
/// <summary>
/// A specialized list used for keeping track of Windows 10's memory placeholders.
/// This is used to make splitting a large placeholder into equally small
/// granular chunks much easier, while avoiding slowdown due to a large number of
/// placeholders by coalescing adjacent granular placeholders after they are unused.
/// </summary>
class PlaceholderList
{
private class PlaceholderBlock : IRange
{
public ulong Address { get; }
public ulong Size { get; private set; }
public ulong EndAddress { get; private set; }
public bool IsGranular { get; set; }
public PlaceholderBlock(ulong id, ulong size, bool isGranular)
{
Address = id;
Size = size;
EndAddress = id + size;
IsGranular = isGranular;
}
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
public void ExtendTo(ulong end, RangeList<PlaceholderBlock> list)
{
EndAddress = end;
Size = end - Address;
list.UpdateEndAddress(this);
}
}
private RangeList<PlaceholderBlock> _placeholders;
private PlaceholderBlock[] _foundBlocks = new PlaceholderBlock[32];
/// <summary>
/// Create a new list to manage placeholders.
/// Note that a size is measured in granular placeholders.
/// If the placeholder granularity is 65536 bytes, then a 65536 region will be covered by 1 placeholder granularity.
/// </summary>
/// <param name="size">Size measured in granular placeholders</param>
public PlaceholderList(ulong size)
{
_placeholders = new RangeList<PlaceholderBlock>();
_placeholders.Add(new PlaceholderBlock(0, size, false));
}
/// <summary>
/// Ensure that the given range of placeholders is granular.
/// </summary>
/// <param name="id">Start of the range, measured in granular placeholders</param>
/// <param name="size">Size of the range, measured in granular placeholders</param>
/// <param name="splitPlaceholderCallback">Callback function to run when splitting placeholders, calls with (start, middle)</param>
public void EnsurePlaceholders(ulong id, ulong size, Action<ulong, ulong> splitPlaceholderCallback)
{
// Search 1 before and after the placeholders, as we may need to expand/join granular regions surrounding the requested area.
ulong endId = id + size;
ulong searchStartId = id == 0 ? 0 : (id - 1);
int blockCount = _placeholders.FindOverlapsNonOverlapping(searchStartId, (endId - searchStartId) + 1, ref _foundBlocks);
PlaceholderBlock first = _foundBlocks[0];
PlaceholderBlock last = _foundBlocks[blockCount - 1];
bool overlapStart = first.EndAddress >= id && id != 0;
bool overlapEnd = last.Address <= endId;
for (int i = 0; i < blockCount; i++)
{
// Go through all non-granular blocks in the range and create placeholders.
PlaceholderBlock block = _foundBlocks[i];
if (block.Address <= id && block.EndAddress >= endId && block.IsGranular)
{
return; // The region we're searching for is already granular.
}
if (!block.IsGranular)
{
ulong placeholderStart = Math.Max(block.Address, id);
ulong placeholderEnd = Math.Min(block.EndAddress - 1, endId);
if (placeholderStart != block.Address && placeholderStart != block.EndAddress)
{
splitPlaceholderCallback(block.Address, placeholderStart - block.Address);
}
for (ulong j = placeholderStart; j < placeholderEnd; j++)
{
splitPlaceholderCallback(j, 1);
}
}
if (!((block == first && overlapStart) || (block == last && overlapEnd)))
{
// Remove blocks that will be replaced
_placeholders.Remove(block);
}
}
if (overlapEnd)
{
if (!(first == last && overlapStart))
{
_placeholders.Remove(last);
}
if (last.IsGranular)
{
endId = last.EndAddress;
}
else if (last.EndAddress != endId)
{
_placeholders.Add(new PlaceholderBlock(endId, last.EndAddress - endId, false));
}
}
if (overlapStart && first.IsGranular)
{
first.ExtendTo(endId, _placeholders);
}
else
{
if (overlapStart)
{
first.ExtendTo(id, _placeholders);
}
_placeholders.Add(new PlaceholderBlock(id, endId - id, true));
}
ValidateList();
}
/// <summary>
/// Coalesces placeholders in a given region, as they are not being used.
/// This assumes that the region only contains placeholders - all views and allocations must have been replaced with placeholders.
/// </summary>
/// <param name="id">Start of the range, measured in granular placeholders</param>
/// <param name="size">Size of the range, measured in granular placeholders</param>
/// <param name="coalescePlaceholderCallback">Callback function to run when coalescing two placeholders, calls with (start, end)</param>
public void RemovePlaceholders(ulong id, ulong size, Action<ulong, ulong> coalescePlaceholderCallback)
{
ulong endId = id + size;
int blockCount = _placeholders.FindOverlapsNonOverlapping(id, size, ref _foundBlocks);
PlaceholderBlock first = _foundBlocks[0];
PlaceholderBlock last = _foundBlocks[blockCount - 1];
// All granular blocks must have non-granular blocks surrounding them, unless they start at 0.
// We must extend the non-granular blocks into the granular ones. This does mean that we need to search twice.
if (first.IsGranular || last.IsGranular)
{
ulong surroundStart = Math.Max(0, (first.IsGranular && first.Address != 0) ? first.Address - 1 : id);
blockCount = _placeholders.FindOverlapsNonOverlapping(
surroundStart,
(last.IsGranular ? last.EndAddress + 1 : endId) - surroundStart,
ref _foundBlocks);
first = _foundBlocks[0];
last = _foundBlocks[blockCount - 1];
}
if (first == last)
{
return; // Already coalesced.
}
PlaceholderBlock extendBlock = id == 0 ? null : first;
bool newBlock = false;
for (int i = extendBlock == null ? 0 : 1; i < blockCount; i++)
{
// Go through all granular blocks in the range and extend placeholders.
PlaceholderBlock block = _foundBlocks[i];
ulong blockEnd = block.EndAddress;
ulong extendFrom;
ulong extent = Math.Min(blockEnd, endId);
if (block.Address < id && blockEnd > id)
{
block.ExtendTo(id, _placeholders);
extendBlock = null;
}
else
{
_placeholders.Remove(block);
}
if (extendBlock == null)
{
extendFrom = id;
extendBlock = new PlaceholderBlock(id, extent - id, false);
_placeholders.Add(extendBlock);
if (blockEnd > extent)
{
_placeholders.Add(new PlaceholderBlock(extent, blockEnd - extent, true));
// Skip the next non-granular block, and extend from that into the granular block afterwards.
// (assuming that one is still in the requested range)
if (i + 1 < blockCount)
{
extendBlock = _foundBlocks[i + 1];
}
i++;
}
newBlock = true;
}
else
{
extendFrom = extendBlock.Address;
extendBlock.ExtendTo(block.IsGranular ? extent : block.EndAddress, _placeholders);
}
if (block.IsGranular)
{
ulong placeholderStart = Math.Max(block.Address, id);
ulong placeholderEnd = extent;
if (newBlock)
{
placeholderStart++;
newBlock = false;
}
for (ulong j = placeholderStart; j < placeholderEnd; j++)
{
coalescePlaceholderCallback(extendFrom, (j + 1) - extendFrom);
}
if (extent < block.EndAddress)
{
_placeholders.Add(new PlaceholderBlock(placeholderEnd, block.EndAddress - placeholderEnd, true));
ValidateList();
return;
}
}
else
{
coalescePlaceholderCallback(extendFrom, block.EndAddress - extendFrom);
}
}
ValidateList();
}
/// <summary>
/// Ensure that the placeholder list is valid.
/// A valid list should not have any gaps between the placeholders,
/// and there may be no placehonders with the same IsGranular value next to each other.
/// </summary>
[Conditional("DEBUG")]
private void ValidateList()
{
bool isGranular = false;
bool first = true;
ulong lastAddress = 0;
foreach (var placeholder in _placeholders)
{
if (placeholder.Address != lastAddress)
{
throw new InvalidOperationException("Gap in placeholder list.");
}
if (isGranular == placeholder.IsGranular && !first)
{
throw new InvalidOperationException("Placeholder list not alternating.");
}
first = false;
isGranular = placeholder.IsGranular;
lastAddress = placeholder.EndAddress;
}
}
}
}

View file

@ -0,0 +1,633 @@
using System;
using System.Diagnostics;
using System.Threading;
namespace Ryujinx.Memory.WindowsShared
{
/// <summary>
/// Windows memory placeholder manager.
/// </summary>
class PlaceholderManager
{
private const ulong MinimumPageSize = 0x1000;
[ThreadStatic]
private static int _threadLocalPartialUnmapsCount;
private readonly IntervalTree<ulong, ulong> _mappings;
private readonly IntervalTree<ulong, MemoryPermission> _protections;
private readonly ReaderWriterLock _partialUnmapLock;
private int _partialUnmapsCount;
/// <summary>
/// Creates a new instance of the Windows memory placeholder manager.
/// </summary>
public PlaceholderManager()
{
_mappings = new IntervalTree<ulong, ulong>();
_protections = new IntervalTree<ulong, MemoryPermission>();
_partialUnmapLock = new ReaderWriterLock();
}
/// <summary>
/// Reserves a range of the address space to be later mapped as shared memory views.
/// </summary>
/// <param name="address">Start address of the region to reserve</param>
/// <param name="size">Size in bytes of the region to reserve</param>
public void ReserveRange(ulong address, ulong size)
{
lock (_mappings)
{
_mappings.Add(address, address + size, ulong.MaxValue);
}
}
/// <summary>
/// Maps a shared memory view on a previously reserved memory region.
/// </summary>
/// <param name="sharedMemory">Shared memory that will be the backing storage for the view</param>
/// <param name="srcOffset">Offset in the shared memory to map</param>
/// <param name="location">Address to map the view into</param>
/// <param name="size">Size of the view in bytes</param>
public void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size)
{
_partialUnmapLock.AcquireReaderLock(Timeout.Infinite);
try
{
UnmapViewInternal(sharedMemory, location, size);
MapViewInternal(sharedMemory, srcOffset, location, size);
}
finally
{
_partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Maps a shared memory view on a previously reserved memory region.
/// </summary>
/// <param name="sharedMemory">Shared memory that will be the backing storage for the view</param>
/// <param name="srcOffset">Offset in the shared memory to map</param>
/// <param name="location">Address to map the view into</param>
/// <param name="size">Size of the view in bytes</param>
/// <exception cref="WindowsApiException">Thrown when the Windows API returns an error mapping the memory</exception>
private void MapViewInternal(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size)
{
SplitForMap((ulong)location, (ulong)size, srcOffset);
var ptr = WindowsApi.MapViewOfFile3(
sharedMemory,
WindowsApi.CurrentProcessHandle,
location,
srcOffset,
size,
0x4000,
MemoryProtection.ReadWrite,
IntPtr.Zero,
0);
if (ptr == IntPtr.Zero)
{
throw new WindowsApiException("MapViewOfFile3");
}
}
/// <summary>
/// Splits a larger placeholder, slicing at the start and end address, for a new memory mapping.
/// </summary>
/// <param name="address">Address to split</param>
/// <param name="size">Size of the new region</param>
/// <param name="backingOffset">Offset in the shared memory that will be mapped</param>
private void SplitForMap(ulong address, ulong size, ulong backingOffset)
{
ulong endAddress = address + size;
var overlaps = Array.Empty<IntervalTreeNode<ulong, ulong>>();
lock (_mappings)
{
int count = _mappings.Get(address, endAddress, ref overlaps);
Debug.Assert(count == 1);
Debug.Assert(!IsMapped(overlaps[0].Value));
var overlap = overlaps[0];
// Tree operations might modify the node start/end values, so save a copy before we modify the tree.
ulong overlapStart = overlap.Start;
ulong overlapEnd = overlap.End;
ulong overlapValue = overlap.Value;
_mappings.Remove(overlap);
bool overlapStartsBefore = overlapStart < address;
bool overlapEndsAfter = overlapEnd > endAddress;
if (overlapStartsBefore && overlapEndsAfter)
{
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)size,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(overlapStart, address, overlapValue);
_mappings.Add(endAddress, overlapEnd, AddBackingOffset(overlapValue, endAddress - overlapStart));
}
else if (overlapStartsBefore)
{
ulong overlappedSize = overlapEnd - address;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)overlappedSize,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(overlapStart, address, overlapValue);
}
else if (overlapEndsAfter)
{
ulong overlappedSize = endAddress - overlapStart;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)overlapStart,
(IntPtr)overlappedSize,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(endAddress, overlapEnd, AddBackingOffset(overlapValue, overlappedSize));
}
_mappings.Add(address, endAddress, backingOffset);
}
}
/// <summary>
/// Unmaps a view that has been previously mapped with <see cref="MapView"/>.
/// </summary>
/// <remarks>
/// For "partial unmaps" (when not the entire mapped range is being unmapped), it might be
/// necessary to unmap the whole range and then remap the sub-ranges that should remain mapped.
/// </remarks>
/// <param name="sharedMemory">Shared memory that the view being unmapped belongs to</param>
/// <param name="location">Address to unmap</param>
/// <param name="size">Size of the region to unmap in bytes</param>
public void UnmapView(IntPtr sharedMemory, IntPtr location, IntPtr size)
{
_partialUnmapLock.AcquireReaderLock(Timeout.Infinite);
try
{
UnmapViewInternal(sharedMemory, location, size);
}
finally
{
_partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Unmaps a view that has been previously mapped with <see cref="MapView"/>.
/// </summary>
/// <remarks>
/// For "partial unmaps" (when not the entire mapped range is being unmapped), it might be
/// necessary to unmap the whole range and then remap the sub-ranges that should remain mapped.
/// </remarks>
/// <param name="sharedMemory">Shared memory that the view being unmapped belongs to</param>
/// <param name="location">Address to unmap</param>
/// <param name="size">Size of the region to unmap in bytes</param>
/// <exception cref="WindowsApiException">Thrown when the Windows API returns an error unmapping or remapping the memory</exception>
private void UnmapViewInternal(IntPtr sharedMemory, IntPtr location, IntPtr size)
{
ulong startAddress = (ulong)location;
ulong unmapSize = (ulong)size;
ulong endAddress = startAddress + unmapSize;
var overlaps = Array.Empty<IntervalTreeNode<ulong, ulong>>();
int count = 0;
lock (_mappings)
{
count = _mappings.Get(startAddress, endAddress, ref overlaps);
}
for (int index = 0; index < count; index++)
{
var overlap = overlaps[index];
if (IsMapped(overlap.Value))
{
if (!WindowsApi.UnmapViewOfFile2(WindowsApi.CurrentProcessHandle, (IntPtr)overlap.Start, 2))
{
throw new WindowsApiException("UnmapViewOfFile2");
}
// Tree operations might modify the node start/end values, so save a copy before we modify the tree.
ulong overlapStart = overlap.Start;
ulong overlapEnd = overlap.End;
ulong overlapValue = overlap.Value;
_mappings.Remove(overlap);
_mappings.Add(overlapStart, overlapEnd, ulong.MaxValue);
bool overlapStartsBefore = overlapStart < startAddress;
bool overlapEndsAfter = overlapEnd > endAddress;
if (overlapStartsBefore || overlapEndsAfter)
{
// If the overlap extends beyond the region we are unmapping,
// then we need to re-map the regions that are supposed to remain mapped.
// This is necessary because Windows does not support partial view unmaps.
// That is, you can only fully unmap a view that was previously mapped, you can't just unmap a chunck of it.
LockCookie lockCookie = _partialUnmapLock.UpgradeToWriterLock(Timeout.Infinite);
_partialUnmapsCount++;
if (overlapStartsBefore)
{
ulong remapSize = startAddress - overlapStart;
MapViewInternal(sharedMemory, overlapValue, (IntPtr)overlapStart, (IntPtr)remapSize);
RestoreRangeProtection(overlapStart, remapSize);
}
if (overlapEndsAfter)
{
ulong overlappedSize = endAddress - overlapStart;
ulong remapBackingOffset = overlapValue + overlappedSize;
ulong remapAddress = overlapStart + overlappedSize;
ulong remapSize = overlapEnd - endAddress;
MapViewInternal(sharedMemory, remapBackingOffset, (IntPtr)remapAddress, (IntPtr)remapSize);
RestoreRangeProtection(remapAddress, remapSize);
}
_partialUnmapLock.DowngradeFromWriterLock(ref lockCookie);
}
}
}
CoalesceForUnmap(startAddress, unmapSize);
RemoveProtection(startAddress, unmapSize);
}
/// <summary>
/// Coalesces adjacent placeholders after unmap.
/// </summary>
/// <param name="address">Address of the region that was unmapped</param>
/// <param name="size">Size of the region that was unmapped in bytes</param>
private void CoalesceForUnmap(ulong address, ulong size)
{
ulong endAddress = address + size;
var overlaps = Array.Empty<IntervalTreeNode<ulong, ulong>>();
int unmappedCount = 0;
lock (_mappings)
{
int count = _mappings.Get(address - MinimumPageSize, endAddress + MinimumPageSize, ref overlaps);
if (count < 2)
{
// Nothing to coalesce if we only have 1 or no overlaps.
return;
}
for (int index = 0; index < count; index++)
{
var overlap = overlaps[index];
if (!IsMapped(overlap.Value))
{
if (address > overlap.Start)
{
address = overlap.Start;
}
if (endAddress < overlap.End)
{
endAddress = overlap.End;
}
_mappings.Remove(overlap);
unmappedCount++;
}
}
_mappings.Add(address, endAddress, ulong.MaxValue);
}
if (unmappedCount > 1)
{
size = endAddress - address;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)size,
AllocationType.Release | AllocationType.CoalescePlaceholders));
}
}
/// <summary>
/// Reprotects a region of memory that has been mapped.
/// </summary>
/// <param name="address">Address of the region to reprotect</param>
/// <param name="size">Size of the region to reprotect in bytes</param>
/// <param name="permission">New permissions</param>
/// <returns>True if the reprotection was successful, false otherwise</returns>
public bool ReprotectView(IntPtr address, IntPtr size, MemoryPermission permission)
{
_partialUnmapLock.AcquireReaderLock(Timeout.Infinite);
try
{
return ReprotectViewInternal(address, size, permission, false);
}
finally
{
_partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Reprotects a region of memory that has been mapped.
/// </summary>
/// <param name="address">Address of the region to reprotect</param>
/// <param name="size">Size of the region to reprotect in bytes</param>
/// <param name="permission">New permissions</param>
/// <param name="throwOnError">Throw an exception instead of returning an error if the operation fails</param>
/// <returns>True if the reprotection was successful or if <paramref name="throwOnError"/> is true, false otherwise</returns>
/// <exception cref="WindowsApiException">If <paramref name="throwOnError"/> is true, it is thrown when the Windows API returns an error reprotecting the memory</exception>
private bool ReprotectViewInternal(IntPtr address, IntPtr size, MemoryPermission permission, bool throwOnError)
{
ulong reprotectAddress = (ulong)address;
ulong reprotectSize = (ulong)size;
ulong endAddress = reprotectAddress + reprotectSize;
var overlaps = Array.Empty<IntervalTreeNode<ulong, ulong>>();
int count = 0;
lock (_mappings)
{
count = _mappings.Get(reprotectAddress, endAddress, ref overlaps);
}
bool success = true;
for (int index = 0; index < count; index++)
{
var overlap = overlaps[index];
ulong mappedAddress = overlap.Start;
ulong mappedSize = overlap.End - overlap.Start;
if (mappedAddress < reprotectAddress)
{
ulong delta = reprotectAddress - mappedAddress;
mappedAddress = reprotectAddress;
mappedSize -= delta;
}
ulong mappedEndAddress = mappedAddress + mappedSize;
if (mappedEndAddress > endAddress)
{
ulong delta = mappedEndAddress - endAddress;
mappedSize -= delta;
}
if (!WindowsApi.VirtualProtect((IntPtr)mappedAddress, (IntPtr)mappedSize, WindowsApi.GetProtection(permission), out _))
{
if (throwOnError)
{
throw new WindowsApiException("VirtualProtect");
}
success = false;
}
// We only keep track of "non-standard" protections,
// that is, everything that is not just RW (which is the default when views are mapped).
if (permission == MemoryPermission.ReadAndWrite)
{
RemoveProtection(mappedAddress, mappedSize);
}
else
{
AddProtection(mappedAddress, mappedSize, permission);
}
}
return success;
}
/// <summary>
/// Checks the result of a VirtualFree operation, throwing if needed.
/// </summary>
/// <param name="success">Operation result</param>
/// <exception cref="WindowsApiException">Thrown if <paramref name="success"/> is false</exception>
private static void CheckFreeResult(bool success)
{
if (!success)
{
throw new WindowsApiException("VirtualFree");
}
}
/// <summary>
/// Adds an offset to a backing offset. This will do nothing if the backing offset is the special "unmapped" value.
/// </summary>
/// <param name="backingOffset">Backing offset</param>
/// <param name="offset">Offset to be added</param>
/// <returns>Added offset or just <paramref name="backingOffset"/> if the region is unmapped</returns>
private static ulong AddBackingOffset(ulong backingOffset, ulong offset)
{
if (backingOffset == ulong.MaxValue)
{
return backingOffset;
}
return backingOffset + offset;
}
/// <summary>
/// Checks if a region is unmapped.
/// </summary>
/// <param name="backingOffset">Backing offset to check</param>
/// <returns>True if the backing offset is the special "unmapped" value, false otherwise</returns>
private static bool IsMapped(ulong backingOffset)
{
return backingOffset != ulong.MaxValue;
}
/// <summary>
/// Adds a protection to the list of protections.
/// </summary>
/// <param name="address">Address of the protected region</param>
/// <param name="size">Size of the protected region in bytes</param>
/// <param name="permission">Memory permissions of the region</param>
private void AddProtection(ulong address, ulong size, MemoryPermission permission)
{
ulong endAddress = address + size;
var overlaps = Array.Empty<IntervalTreeNode<ulong, MemoryPermission>>();
int count = 0;
lock (_protections)
{
count = _protections.Get(address, endAddress, ref overlaps);
Debug.Assert(count > 0);
if (count == 1 &&
overlaps[0].Start <= address &&
overlaps[0].End >= endAddress &&
overlaps[0].Value == permission)
{
return;
}
ulong startAddress = address;
for (int index = 0; index < count; index++)
{
var protection = overlaps[index];
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
MemoryPermission protPermission = protection.Value;
_protections.Remove(protection);
if (protection.Value == permission)
{
if (startAddress > protAddress)
{
startAddress = protAddress;
}
if (endAddress < protEndAddress)
{
endAddress = protEndAddress;
}
}
else
{
if (startAddress > protAddress)
{
_protections.Add(protAddress, startAddress, protPermission);
}
if (endAddress < protEndAddress)
{
_protections.Add(endAddress, protEndAddress, protPermission);
}
}
}
_protections.Add(startAddress, endAddress, permission);
}
}
/// <summary>
/// Removes protection from the list of protections.
/// </summary>
/// <param name="address">Address of the protected region</param>
/// <param name="size">Size of the protected region in bytes</param>
private void RemoveProtection(ulong address, ulong size)
{
ulong endAddress = address + size;
var overlaps = Array.Empty<IntervalTreeNode<ulong, MemoryPermission>>();
int count = 0;
lock (_protections)
{
count = _protections.Get(address, endAddress, ref overlaps);
for (int index = 0; index < count; index++)
{
var protection = overlaps[index];
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
MemoryPermission protPermission = protection.Value;
_protections.Remove(protection);
if (address > protAddress)
{
_protections.Add(protAddress, address, protPermission);
}
if (endAddress < protEndAddress)
{
_protections.Add(endAddress, protEndAddress, protPermission);
}
}
}
}
/// <summary>
/// Restores the protection of a given memory region that was remapped, using the protections list.
/// </summary>
/// <param name="address">Address of the remapped region</param>
/// <param name="size">Size of the remapped region in bytes</param>
private void RestoreRangeProtection(ulong address, ulong size)
{
ulong endAddress = address + size;
var overlaps = Array.Empty<IntervalTreeNode<ulong, MemoryPermission>>();
int count = 0;
lock (_protections)
{
count = _protections.Get(address, endAddress, ref overlaps);
}
ulong startAddress = address;
for (int index = 0; index < count; index++)
{
var protection = overlaps[index];
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
if (protAddress < address)
{
protAddress = address;
}
if (protEndAddress > endAddress)
{
protEndAddress = endAddress;
}
ReprotectViewInternal((IntPtr)protAddress, (IntPtr)(protEndAddress - protAddress), protection.Value, true);
}
}
/// <summary>
/// Checks if an access violation handler should retry execution due to a fault caused by partial unmap.
/// </summary>
/// <remarks>
/// Due to Windows limitations, <see cref="UnmapView"/> might need to unmap more memory than requested.
/// The additional memory that was unmapped is later remapped, however this leaves a time gap where the
/// memory might be accessed but is unmapped. Users of the API must compensate for that by catching the
/// access violation and retrying if it happened between the unmap and remap operation.
/// This method can be used to decide if retrying in such cases is necessary or not.
/// </remarks>
/// <returns>True if execution should be retried, false otherwise</returns>
public bool RetryFromAccessViolation()
{
_partialUnmapLock.AcquireReaderLock(Timeout.Infinite);
bool retry = _threadLocalPartialUnmapsCount != _partialUnmapsCount;
if (retry)
{
_threadLocalPartialUnmapsCount = _partialUnmapsCount;
}
_partialUnmapLock.ReleaseReaderLock();
return retry;
}
}
}

View file

@ -0,0 +1,93 @@
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory.WindowsShared
{
static class WindowsApi
{
public static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
public static readonly IntPtr CurrentProcessHandle = new IntPtr(-1);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern IntPtr VirtualAlloc(
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect);
[DllImport("KernelBase.dll", SetLastError = true)]
public static extern IntPtr VirtualAlloc2(
IntPtr process,
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect,
IntPtr extendedParameters,
ulong parameterCount);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern bool VirtualProtect(
IntPtr lpAddress,
IntPtr dwSize,
MemoryProtection flNewProtect,
out MemoryProtection lpflOldProtect);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern IntPtr CreateFileMapping(
IntPtr hFile,
IntPtr lpFileMappingAttributes,
FileMapProtection flProtect,
uint dwMaximumSizeHigh,
uint dwMaximumSizeLow,
[MarshalAs(UnmanagedType.LPWStr)] string lpName);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern bool CloseHandle(IntPtr hObject);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern IntPtr MapViewOfFile(
IntPtr hFileMappingObject,
uint dwDesiredAccess,
uint dwFileOffsetHigh,
uint dwFileOffsetLow,
IntPtr dwNumberOfBytesToMap);
[DllImport("KernelBase.dll", SetLastError = true)]
public static extern IntPtr MapViewOfFile3(
IntPtr hFileMappingObject,
IntPtr process,
IntPtr baseAddress,
ulong offset,
IntPtr dwNumberOfBytesToMap,
ulong allocationType,
MemoryProtection dwDesiredAccess,
IntPtr extendedParameters,
ulong parameterCount);
[DllImport("kernel32.dll", SetLastError = true)]
public static extern bool UnmapViewOfFile(IntPtr lpBaseAddress);
[DllImport("KernelBase.dll", SetLastError = true)]
public static extern bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags);
[DllImport("kernel32.dll")]
public static extern uint GetLastError();
public static MemoryProtection GetProtection(MemoryPermission permission)
{
return permission switch
{
MemoryPermission.None => MemoryProtection.NoAccess,
MemoryPermission.Read => MemoryProtection.ReadOnly,
MemoryPermission.ReadAndWrite => MemoryProtection.ReadWrite,
MemoryPermission.ReadAndExecute => MemoryProtection.ExecuteRead,
MemoryPermission.ReadWriteExecute => MemoryProtection.ExecuteReadWrite,
MemoryPermission.Execute => MemoryProtection.Execute,
_ => throw new MemoryProtectionException(permission)
};
}
}
}

View file

@ -0,0 +1,24 @@
using System;
namespace Ryujinx.Memory.WindowsShared
{
class WindowsApiException : Exception
{
public WindowsApiException()
{
}
public WindowsApiException(string functionName) : base(CreateMessage(functionName))
{
}
public WindowsApiException(string functionName, Exception inner) : base(CreateMessage(functionName), inner)
{
}
private static string CreateMessage(string functionName)
{
return $"{functionName} returned error code 0x{WindowsApi.GetLastError():X}.";
}
}
}

View file

@ -54,9 +54,9 @@ namespace Ryujinx.Tests.Cpu
_currAddress = CodeBaseAddress;
_ram = new MemoryBlock(Size * 2);
_memory = new MemoryManager(1ul << 16);
_memory = new MemoryManager(_ram, 1ul << 16);
_memory.IncrementReferenceCount();
_memory.Map(CodeBaseAddress, _ram.GetPointer(0, Size * 2), Size * 2);
_memory.Map(CodeBaseAddress, 0, Size * 2);
_context = CpuContext.CreateExecutionContext();
Translator.IsReadyForTranslation.Set();

View file

@ -49,9 +49,9 @@ namespace Ryujinx.Tests.Cpu
_currAddress = CodeBaseAddress;
_ram = new MemoryBlock(Size * 2);
_memory = new MemoryManager(1ul << 16);
_memory = new MemoryManager(_ram, 1ul << 16);
_memory.IncrementReferenceCount();
_memory.Map(CodeBaseAddress, _ram.GetPointer(0, Size * 2), Size * 2);
_memory.Map(CodeBaseAddress, 0, Size * 2);
_context = CpuContext.CreateExecutionContext();
_context.IsAarch32 = true;