diff --git a/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs b/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
index a807eed51c..ace6fe1ce9 100644
--- a/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
+++ b/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
@@ -157,7 +157,7 @@ namespace ARMeilleure.Instructions
context.Copy(temp, value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -198,7 +198,7 @@ namespace ARMeilleure.Instructions
SetInt(context, rt, value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -265,7 +265,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(rt), value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
break;
}
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -385,7 +385,7 @@ namespace ARMeilleure.Instructions
break;
}
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -403,6 +403,27 @@ namespace ARMeilleure.Instructions
{
return EmitHostMappedPointer(context, address);
}
+ else if (context.Memory.Type.IsHostTracked())
+ {
+ if (address.Type == OperandType.I32)
+ {
+ address = context.ZeroExtend32(OperandType.I64, address);
+ }
+
+ if (context.Memory.Type == MemoryManagerType.HostTracked)
+ {
+ Operand mask = Const(ulong.MaxValue >> (64 - context.Memory.AddressSpaceBits));
+ address = context.BitwiseAnd(address, mask);
+ }
+
+ Operand ptBase = !context.HasPtc
+ ? Const(context.Memory.PageTablePointer.ToInt64())
+ : Const(context.Memory.PageTablePointer.ToInt64(), Ptc.PageTableSymbol);
+
+ Operand ptOffset = context.ShiftRightUI(address, Const(PageBits));
+
+ return context.Add(address, context.Load(OperandType.I64, context.Add(ptBase, context.ShiftLeft(ptOffset, Const(3)))));
+ }
int ptLevelBits = context.Memory.AddressSpaceBits - PageBits;
int ptLevelSize = 1 << ptLevelBits;
diff --git a/src/ARMeilleure/Memory/MemoryManagerType.cs b/src/ARMeilleure/Memory/MemoryManagerType.cs
index b1cdbb069a..bc8ae26359 100644
--- a/src/ARMeilleure/Memory/MemoryManagerType.cs
+++ b/src/ARMeilleure/Memory/MemoryManagerType.cs
@@ -29,6 +29,18 @@ namespace ARMeilleure.Memory
/// Allows invalid access from JIT code to the rest of the program, but is faster.
///
HostMappedUnsafe,
+
+ ///
+ /// High level implementation using a software flat page table for address translation
+ /// with no support for handling invalid or non-contiguous memory access.
+ ///
+ HostTracked,
+
+ ///
+ /// High level implementation using a software flat page table for address translation
+ /// without masking the address and no support for handling invalid or non-contiguous memory access.
+ ///
+ HostTrackedUnsafe,
}
public static class MemoryManagerTypeExtensions
@@ -37,5 +49,15 @@ namespace ARMeilleure.Memory
{
return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
}
+
+ public static bool IsHostTracked(this MemoryManagerType type)
+ {
+ return type == MemoryManagerType.HostTracked || type == MemoryManagerType.HostTrackedUnsafe;
+ }
+
+ public static bool IsHostMappedOrTracked(this MemoryManagerType type)
+ {
+ return type.IsHostMapped() || type.IsHostTracked();
+ }
}
}
diff --git a/src/ARMeilleure/Signal/NativeSignalHandlerGenerator.cs b/src/ARMeilleure/Signal/NativeSignalHandlerGenerator.cs
index c5e708e169..2ec5bc1b38 100644
--- a/src/ARMeilleure/Signal/NativeSignalHandlerGenerator.cs
+++ b/src/ARMeilleure/Signal/NativeSignalHandlerGenerator.cs
@@ -21,10 +21,8 @@ namespace ARMeilleure.Signal
private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005;
- private static Operand EmitGenericRegionCheck(EmitterContext context, IntPtr signalStructPtr, Operand faultAddress, Operand isWrite, int rangeStructSize, ulong pageSize)
+ private static Operand EmitGenericRegionCheck(EmitterContext context, IntPtr signalStructPtr, Operand faultAddress, Operand isWrite, int rangeStructSize)
{
- ulong pageMask = pageSize - 1;
-
Operand inRegionLocal = context.AllocateLocal(OperandType.I32);
context.Copy(inRegionLocal, Const(0));
@@ -51,7 +49,7 @@ namespace ARMeilleure.Signal
// Only call tracking if in range.
context.BranchIfFalse(nextLabel, inRange, BasicBlockFrequency.Cold);
- Operand offset = context.BitwiseAnd(context.Subtract(faultAddress, rangeAddress), Const(~pageMask));
+ Operand offset = context.Subtract(faultAddress, rangeAddress);
// Call the tracking action, with the pointer's relative offset to the base address.
Operand trackingActionPtr = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 20));
@@ -62,8 +60,10 @@ namespace ARMeilleure.Signal
// Tracking action should be non-null to call it, otherwise assume false return.
context.BranchIfFalse(skipActionLabel, trackingActionPtr);
- Operand result = context.Call(trackingActionPtr, OperandType.I32, offset, Const(pageSize), isWrite);
- context.Copy(inRegionLocal, result);
+ Operand result = context.Call(trackingActionPtr, OperandType.I64, offset, Const(1UL), isWrite);
+ context.Copy(inRegionLocal, context.ICompareNotEqual(result, Const(0UL)));
+
+ GenerateFaultAddressPatchCode(context, faultAddress, result);
context.MarkLabel(skipActionLabel);
@@ -155,7 +155,7 @@ namespace ARMeilleure.Signal
throw new PlatformNotSupportedException();
}
- public static byte[] GenerateUnixSignalHandler(IntPtr signalStructPtr, int rangeStructSize, ulong pageSize)
+ public static byte[] GenerateUnixSignalHandler(IntPtr signalStructPtr, int rangeStructSize)
{
EmitterContext context = new();
@@ -168,7 +168,7 @@ namespace ARMeilleure.Signal
Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1.
- Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize, pageSize);
+ Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize);
Operand endLabel = Label();
@@ -203,7 +203,7 @@ namespace ARMeilleure.Signal
return Compiler.Compile(cfg, argTypes, OperandType.None, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code;
}
- public static byte[] GenerateWindowsSignalHandler(IntPtr signalStructPtr, int rangeStructSize, ulong pageSize)
+ public static byte[] GenerateWindowsSignalHandler(IntPtr signalStructPtr, int rangeStructSize)
{
EmitterContext context = new();
@@ -232,7 +232,7 @@ namespace ARMeilleure.Signal
Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1.
- Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize, pageSize);
+ Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite, rangeStructSize);
Operand endLabel = Label();
@@ -256,5 +256,86 @@ namespace ARMeilleure.Signal
return Compiler.Compile(cfg, argTypes, OperandType.I32, CompilerOptions.HighCq, RuntimeInformation.ProcessArchitecture).Code;
}
+
+ private static void GenerateFaultAddressPatchCode(EmitterContext context, Operand faultAddress, Operand newAddress)
+ {
+ if (RuntimeInformation.ProcessArchitecture == Architecture.Arm64)
+ {
+ if (SupportsFaultAddressPatchingForHostOs())
+ {
+ Operand lblSkip = Label();
+
+ context.BranchIf(lblSkip, faultAddress, newAddress, Comparison.Equal);
+
+ Operand ucontextPtr = context.LoadArgument(OperandType.I64, 2);
+ Operand pcCtxAddress = default;
+ ulong baseRegsOffset = 0;
+
+ if (OperatingSystem.IsLinux())
+ {
+ pcCtxAddress = context.Add(ucontextPtr, Const(440UL));
+ baseRegsOffset = 184UL;
+ }
+ else if (OperatingSystem.IsMacOS() || OperatingSystem.IsIOS())
+ {
+ ucontextPtr = context.Load(OperandType.I64, context.Add(ucontextPtr, Const(48UL)));
+
+ pcCtxAddress = context.Add(ucontextPtr, Const(272UL));
+ baseRegsOffset = 16UL;
+ }
+
+ Operand pc = context.Load(OperandType.I64, pcCtxAddress);
+
+ Operand reg = GetAddressRegisterFromArm64Instruction(context, pc);
+ Operand reg64 = context.ZeroExtend32(OperandType.I64, reg);
+ Operand regCtxAddress = context.Add(ucontextPtr, context.Add(context.ShiftLeft(reg64, Const(3)), Const(baseRegsOffset)));
+ Operand regAddress = context.Load(OperandType.I64, regCtxAddress);
+
+ Operand addressDelta = context.Subtract(regAddress, faultAddress);
+
+ context.Store(regCtxAddress, context.Add(newAddress, addressDelta));
+
+ context.MarkLabel(lblSkip);
+ }
+ }
+ }
+
+ private static Operand GetAddressRegisterFromArm64Instruction(EmitterContext context, Operand pc)
+ {
+ Operand inst = context.Load(OperandType.I32, pc);
+ Operand reg = context.AllocateLocal(OperandType.I32);
+
+ Operand isSysInst = context.ICompareEqual(context.BitwiseAnd(inst, Const(0xFFF80000)), Const(0xD5080000));
+
+ Operand lblSys = Label();
+ Operand lblEnd = Label();
+
+ context.BranchIfTrue(lblSys, isSysInst, BasicBlockFrequency.Cold);
+
+ context.Copy(reg, context.BitwiseAnd(context.ShiftRightUI(inst, Const(5)), Const(0x1F)));
+ context.Branch(lblEnd);
+
+ context.MarkLabel(lblSys);
+ context.Copy(reg, context.BitwiseAnd(inst, Const(0x1F)));
+
+ context.MarkLabel(lblEnd);
+
+ return reg;
+ }
+
+ public static bool SupportsFaultAddressPatchingForHost()
+ {
+ return SupportsFaultAddressPatchingForHostArch() && SupportsFaultAddressPatchingForHostOs();
+ }
+
+ private static bool SupportsFaultAddressPatchingForHostArch()
+ {
+ return RuntimeInformation.ProcessArchitecture == Architecture.Arm64;
+ }
+
+ private static bool SupportsFaultAddressPatchingForHostOs()
+ {
+ return OperatingSystem.IsLinux() || OperatingSystem.IsMacOS() || OperatingSystem.IsIOS();
+ }
}
}
diff --git a/src/Ryujinx.Common/Collections/IntrusiveRedBlackTreeNode.cs b/src/Ryujinx.Common/Collections/IntrusiveRedBlackTreeNode.cs
index 8480d51ad6..29d2d0c9a8 100644
--- a/src/Ryujinx.Common/Collections/IntrusiveRedBlackTreeNode.cs
+++ b/src/Ryujinx.Common/Collections/IntrusiveRedBlackTreeNode.cs
@@ -5,10 +5,10 @@ namespace Ryujinx.Common.Collections
///
public class IntrusiveRedBlackTreeNode where T : IntrusiveRedBlackTreeNode
{
- internal bool Color = true;
- internal T Left;
- internal T Right;
- internal T Parent;
+ public bool Color = true;
+ public T Left;
+ public T Right;
+ public T Parent;
public T Predecessor => IntrusiveRedBlackTreeImpl.PredecessorOf((T)this);
public T Successor => IntrusiveRedBlackTreeImpl.SuccessorOf((T)this);
diff --git a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
index 4e3723d554..86936c5929 100644
--- a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
+++ b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
@@ -38,7 +38,7 @@ namespace Ryujinx.Cpu.AppleHv
private readonly HvIpaAllocator _ipaAllocator;
- public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
+ public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, ulong blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
{
_ipaAllocator = ipaAllocator;
}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs
new file mode 100644
index 0000000000..0e24433035
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressIntrusiveRedBlackTree.cs
@@ -0,0 +1,35 @@
+using Ryujinx.Common.Collections;
+using System;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ internal class AddressIntrusiveRedBlackTree : IntrusiveRedBlackTree where T : IntrusiveRedBlackTreeNode, IComparable, IComparable
+ {
+ ///
+ /// Retrieve the node that is considered equal to the specified address by the comparator.
+ ///
+ /// Address to compare with
+ /// Node that is equal to
+ public T GetNode(ulong address)
+ {
+ T node = Root;
+ while (node != null)
+ {
+ int cmp = node.CompareTo(address);
+ if (cmp < 0)
+ {
+ node = node.Left;
+ }
+ else if (cmp > 0)
+ {
+ node = node.Right;
+ }
+ else
+ {
+ return node;
+ }
+ }
+ return null;
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs
new file mode 100644
index 0000000000..224c5edc30
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartition.cs
@@ -0,0 +1,708 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using System;
+using System.Diagnostics;
+using System.Threading;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ readonly struct PrivateRange
+ {
+ public readonly MemoryBlock Memory;
+ public readonly ulong Offset;
+ public readonly ulong Size;
+
+ public static PrivateRange Empty => new(null, 0, 0);
+
+ public PrivateRange(MemoryBlock memory, ulong offset, ulong size)
+ {
+ Memory = memory;
+ Offset = offset;
+ Size = size;
+ }
+ }
+
+ class AddressSpacePartition : IDisposable
+ {
+ public const ulong GuestPageSize = 0x1000;
+
+ private const int DefaultBlockAlignment = 1 << 20;
+
+ private enum MappingType : byte
+ {
+ None,
+ Private,
+ }
+
+ private class Mapping : IntrusiveRedBlackTreeNode, IComparable, IComparable
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public MappingType Type { get; private set; }
+
+ public Mapping(ulong address, ulong size, MappingType type)
+ {
+ Address = address;
+ Size = size;
+ Type = type;
+ }
+
+ public Mapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Mapping left = new(Address, leftSize, Type);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void UpdateState(MappingType newType)
+ {
+ Type = newType;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+
+ public int CompareTo(ulong address)
+ {
+ if (address < Address)
+ {
+ return -1;
+ }
+ else if (address <= EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private class PrivateMapping : IntrusiveRedBlackTreeNode, IComparable, IComparable
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public PrivateMemoryAllocation PrivateAllocation { get; private set; }
+
+ public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
+ {
+ Address = address;
+ Size = size;
+ PrivateAllocation = privateAllocation;
+ }
+
+ public PrivateMapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Debug.Assert(leftSize > 0);
+ Debug.Assert(rightSize > 0);
+
+ (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
+
+ PrivateMapping left = new(Address, leftSize, leftAllocation);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void Map(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation)
+ {
+ baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size);
+ PrivateAllocation = newAllocation;
+ }
+
+ public void Unmap(AddressSpacePartitionMultiAllocation baseBlock, ulong baseAddress)
+ {
+ if (PrivateAllocation.IsValid)
+ {
+ baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size);
+ PrivateAllocation.Dispose();
+ }
+
+ PrivateAllocation = default;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(PrivateMapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+
+ public int CompareTo(ulong address)
+ {
+ if (address < Address)
+ {
+ return -1;
+ }
+ else if (address <= EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly AddressSpacePartitionMultiAllocation _baseMemory;
+ private readonly PrivateMemoryAllocator _privateMemoryAllocator;
+
+ private readonly AddressIntrusiveRedBlackTree _mappingTree;
+ private readonly AddressIntrusiveRedBlackTree _privateTree;
+
+ private readonly ReaderWriterLockSlim _treeLock;
+
+ private readonly ulong _hostPageSize;
+
+ private ulong? _firstPagePa;
+ private ulong? _lastPagePa;
+ private ulong _cachedFirstPagePa;
+ private ulong _cachedLastPagePa;
+ private MemoryBlock _firstPageMemoryForUnmap;
+ private ulong _firstPageOffsetForLateMap;
+ private MemoryPermission _firstPageMemoryProtection;
+
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddress => Address + Size;
+
+ public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size)
+ {
+ _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable);
+ _mappingTree = new AddressIntrusiveRedBlackTree();
+ _privateTree = new AddressIntrusiveRedBlackTree();
+ _treeLock = new ReaderWriterLockSlim();
+
+ _mappingTree.Add(new Mapping(address, size, MappingType.None));
+ _privateTree.Add(new PrivateMapping(address, size, default));
+
+ _hostPageSize = MemoryBlock.GetPageSize();
+
+ _backingMemory = backingMemory;
+ _baseMemory = new(baseMemory);
+
+ _cachedFirstPagePa = ulong.MaxValue;
+ _cachedLastPagePa = ulong.MaxValue;
+
+ Address = address;
+ Size = size;
+ }
+
+ public bool IsEmpty()
+ {
+ _treeLock.EnterReadLock();
+
+ try
+ {
+ Mapping map = _mappingTree.GetNode(Address);
+
+ return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None;
+ }
+ finally
+ {
+ _treeLock.ExitReadLock();
+ }
+ }
+
+ public void Map(ulong va, ulong pa, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ if (va == Address)
+ {
+ _firstPagePa = pa;
+ }
+
+ if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
+ {
+ _lastPagePa = pa + ((EndAddress - GuestPageSize) - va);
+ }
+
+ Update(va, pa, size, MappingType.Private);
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ if (va == Address)
+ {
+ _firstPagePa = null;
+ }
+
+ if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
+ {
+ _lastPagePa = null;
+ }
+
+ Update(va, 0UL, size, MappingType.None);
+ }
+
+ public void ReprotectAligned(ulong va, ulong size, MemoryPermission protection)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ _baseMemory.Reprotect(va - Address, size, protection, false);
+
+ if (va == Address)
+ {
+ _firstPageMemoryProtection = protection;
+ }
+ }
+
+ public void Reprotect(
+ ulong va,
+ ulong size,
+ MemoryPermission protection,
+ AddressSpacePartitioned addressSpace,
+ Action updatePtCallback)
+ {
+ if (_baseMemory.LazyInitMirrorForProtection(addressSpace, Address, Size, protection))
+ {
+ LateMap();
+ }
+
+ updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size);
+ }
+
+ public IntPtr GetPointer(ulong va, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ return _baseMemory.GetPointer(va - Address, size);
+ }
+
+ public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, bool useProtectionMirrors)
+ {
+ ulong firstPagePa = partitionAfter?._firstPagePa ?? ulong.MaxValue;
+ ulong lastPagePa = _lastPagePa ?? ulong.MaxValue;
+
+ if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa)
+ {
+ if (partitionAfter != null && partitionAfter._firstPagePa.HasValue)
+ {
+ (MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset();
+
+ _baseMemory.MapView(firstPageMemory, firstPageOffset, Size, _hostPageSize);
+
+ if (!useProtectionMirrors)
+ {
+ _baseMemory.Reprotect(Size, _hostPageSize, partitionAfter._firstPageMemoryProtection, throwOnFail: false);
+ }
+
+ _firstPageMemoryForUnmap = firstPageMemory;
+ _firstPageOffsetForLateMap = firstPageOffset;
+ }
+ else
+ {
+ MemoryBlock firstPageMemoryForUnmap = _firstPageMemoryForUnmap;
+
+ if (firstPageMemoryForUnmap != null)
+ {
+ _baseMemory.UnmapView(firstPageMemoryForUnmap, Size, _hostPageSize);
+ _firstPageMemoryForUnmap = null;
+ }
+ }
+
+ _cachedFirstPagePa = firstPagePa;
+ _cachedLastPagePa = lastPagePa;
+ }
+ }
+
+ public void ReprotectBridge(MemoryPermission protection)
+ {
+ if (_firstPageMemoryForUnmap != null)
+ {
+ _baseMemory.Reprotect(Size, _hostPageSize, protection, throwOnFail: false);
+ }
+ }
+
+ private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset()
+ {
+ _treeLock.EnterReadLock();
+
+ try
+ {
+ PrivateMapping map = _privateTree.GetNode(Address);
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address));
+ }
+ }
+ finally
+ {
+ _treeLock.ExitReadLock();
+ }
+
+ return (_backingMemory, _firstPagePa.Value);
+ }
+
+ public PrivateRange GetPrivateAllocation(ulong va)
+ {
+ _treeLock.EnterReadLock();
+
+ try
+ {
+ PrivateMapping map = _privateTree.GetNode(va);
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ return new(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (va - map.Address), map.Size - (va - map.Address));
+ }
+ }
+ finally
+ {
+ _treeLock.ExitReadLock();
+ }
+
+ return PrivateRange.Empty;
+ }
+
+ private void Update(ulong va, ulong pa, ulong size, MappingType type)
+ {
+ _treeLock.EnterWriteLock();
+
+ try
+ {
+ Mapping map = _mappingTree.GetNode(va);
+
+ Update(map, va, pa, size, type);
+ }
+ finally
+ {
+ _treeLock.ExitWriteLock();
+ }
+ }
+
+ private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
+ {
+ ulong endAddress = va + size;
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.Address < va)
+ {
+ _mappingTree.Add(map.Split(va));
+ }
+
+ if (map.EndAddress > endAddress)
+ {
+ Mapping newMap = map.Split(endAddress);
+ _mappingTree.Add(newMap);
+ map = newMap;
+ }
+
+ switch (type)
+ {
+ case MappingType.None:
+ ulong alignment = _hostPageSize;
+
+ bool unmappedBefore = map.Predecessor == null ||
+ (map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment));
+
+ bool unmappedAfter = map.Successor == null ||
+ (map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment));
+
+ UnmapPrivate(va, size, unmappedBefore, unmappedAfter);
+ break;
+ case MappingType.Private:
+ MapPrivate(va, size);
+ break;
+ }
+
+ map.UpdateState(type);
+ map = TryCoalesce(map);
+
+ if (map.EndAddress >= endAddress)
+ {
+ break;
+ }
+ }
+
+ return map;
+ }
+
+ private Mapping TryCoalesce(Mapping map)
+ {
+ Mapping previousMap = map.Predecessor;
+ Mapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _mappingTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _mappingTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(Mapping left, Mapping right)
+ {
+ return left.Type == right.Type;
+ }
+
+ private void MapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = _hostPageSize;
+
+ // Expand the range outwards based on page size to ensure that at least the requested region is mapped.
+ ulong vaAligned = BitUtils.AlignDown(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
+
+ PrivateMapping map = _privateTree.GetNode(va);
+
+ for (; map != null; map = map.Successor)
+ {
+ if (!map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, _hostPageSize));
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = _hostPageSize;
+
+ // If the adjacent mappings are unmapped, expand the range outwards,
+ // otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use.
+ ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment);
+ ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment);
+
+ if (endAddressAligned <= vaAligned)
+ {
+ return;
+ }
+
+ PrivateMapping map = _privateTree.GetNode(vaAligned);
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Unmap(_baseMemory, Address);
+ map = TryCoalesce(map);
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private PrivateMapping TryCoalesce(PrivateMapping map)
+ {
+ PrivateMapping previousMap = map.Predecessor;
+ PrivateMapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _privateTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _privateTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
+ {
+ return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
+ }
+
+ private void LateMap()
+ {
+ // Map all existing private allocations.
+ // This is necessary to ensure mirrors that are lazily created have the same mappings as the main one.
+
+ PrivateMapping map = _privateTree.GetNode(Address);
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ _baseMemory.LateMapView(map.PrivateAllocation.Memory, map.PrivateAllocation.Offset, map.Address - Address, map.Size);
+ }
+ }
+
+ MemoryBlock firstPageMemory = _firstPageMemoryForUnmap;
+ ulong firstPageOffset = _firstPageOffsetForLateMap;
+
+ if (firstPageMemory != null)
+ {
+ _baseMemory.LateMapView(firstPageMemory, firstPageOffset, Size, _hostPageSize);
+ }
+ }
+
+ public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
+ {
+ _treeLock.EnterReadLock();
+
+ try
+ {
+ PrivateMapping map = _privateTree.GetNode(va);
+
+ nextVa = map.EndAddress;
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ ulong startOffset = va - map.Address;
+
+ return new(
+ map.PrivateAllocation.Memory,
+ map.PrivateAllocation.Offset + startOffset,
+ Math.Min(map.PrivateAllocation.Size - startOffset, size));
+ }
+ }
+ finally
+ {
+ _treeLock.ExitReadLock();
+ }
+
+ return PrivateRange.Empty;
+ }
+
+ public bool HasPrivateAllocation(ulong va, ulong size, ulong startVa, ulong startSize, ref PrivateRange range)
+ {
+ ulong endVa = va + size;
+
+ _treeLock.EnterReadLock();
+
+ try
+ {
+ for (PrivateMapping map = _privateTree.GetNode(va); map != null && map.Address < endVa; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ if (map.Address <= startVa && map.EndAddress >= startVa + startSize)
+ {
+ ulong startOffset = startVa - map.Address;
+
+ range = new(
+ map.PrivateAllocation.Memory,
+ map.PrivateAllocation.Offset + startOffset,
+ Math.Min(map.PrivateAllocation.Size - startOffset, startSize));
+ }
+
+ return true;
+ }
+ }
+ }
+ finally
+ {
+ _treeLock.ExitReadLock();
+ }
+
+ return false;
+ }
+
+ public void Dispose()
+ {
+ GC.SuppressFinalize(this);
+
+ _privateMemoryAllocator.Dispose();
+ _baseMemory.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs
new file mode 100644
index 0000000000..44dedb6404
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionAllocator.cs
@@ -0,0 +1,202 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ readonly struct AddressSpacePartitionAllocation : IDisposable
+ {
+ private readonly AddressSpacePartitionAllocator _owner;
+ private readonly PrivateMemoryAllocatorImpl.Allocation _allocation;
+
+ public IntPtr Pointer => (IntPtr)((ulong)_allocation.Block.Memory.Pointer + _allocation.Offset);
+
+ public bool IsValid => _owner != null;
+
+ public AddressSpacePartitionAllocation(
+ AddressSpacePartitionAllocator owner,
+ PrivateMemoryAllocatorImpl.Allocation allocation)
+ {
+ _owner = owner;
+ _allocation = allocation;
+ }
+
+ public void RegisterMapping(ulong va, ulong endVa)
+ {
+ _allocation.Block.AddMapping(_allocation.Offset, _allocation.Size, va, endVa);
+ }
+
+ public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
+ {
+ _allocation.Block.Memory.MapView(srcBlock, srcOffset, _allocation.Offset + dstOffset, size);
+ }
+
+ public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
+ {
+ _allocation.Block.Memory.UnmapView(srcBlock, _allocation.Offset + offset, size);
+ }
+
+ public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
+ {
+ _allocation.Block.Memory.Reprotect(_allocation.Offset + offset, size, permission, throwOnFail);
+ }
+
+ public IntPtr GetPointer(ulong offset, ulong size)
+ {
+ return _allocation.Block.Memory.GetPointer(_allocation.Offset + offset, size);
+ }
+
+ public void Dispose()
+ {
+ _allocation.Block.RemoveMapping(_allocation.Offset, _allocation.Size);
+ _owner.Free(_allocation.Block, _allocation.Offset, _allocation.Size);
+ }
+ }
+
+ class AddressSpacePartitionAllocator : PrivateMemoryAllocatorImpl
+ {
+ private const ulong DefaultBlockAlignment = 1UL << 32; // 4GB
+
+ public class Block : PrivateMemoryAllocator.Block
+ {
+ private readonly MemoryTracking _tracking;
+ private readonly Func _readPtCallback;
+ private readonly MemoryEhMeilleure _memoryEh;
+
+ private class Mapping : IntrusiveRedBlackTreeNode, IComparable, IComparable
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddress => Address + Size;
+ public ulong Va { get; }
+ public ulong EndVa { get; }
+
+ public Mapping(ulong address, ulong size, ulong va, ulong endVa)
+ {
+ Address = address;
+ Size = size;
+ Va = va;
+ EndVa = endVa;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+
+ public int CompareTo(ulong address)
+ {
+ if (address < Address)
+ {
+ return -1;
+ }
+ else if (address <= EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly AddressIntrusiveRedBlackTree _mappingTree;
+ private readonly object _lock;
+
+ public Block(MemoryTracking tracking, Func readPtCallback, MemoryBlock memory, ulong size, object locker) : base(memory, size)
+ {
+ _tracking = tracking;
+ _readPtCallback = readPtCallback;
+ _memoryEh = new(memory, null, tracking, VirtualMemoryEvent);
+ _mappingTree = new();
+ _lock = locker;
+ }
+
+ public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa)
+ {
+ _mappingTree.Add(new(offset, size, va, endVa));
+ }
+
+ public void RemoveMapping(ulong offset, ulong size)
+ {
+ _mappingTree.Remove(_mappingTree.GetNode(offset));
+ }
+
+ private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
+ {
+ Mapping map;
+
+ lock (_lock)
+ {
+ map = _mappingTree.GetNode(address);
+ }
+
+ if (map == null)
+ {
+ return 0;
+ }
+
+ address -= map.Address;
+
+ ulong addressAligned = BitUtils.AlignDown(address, AddressSpacePartition.GuestPageSize);
+ ulong endAddressAligned = BitUtils.AlignUp(address + size, AddressSpacePartition.GuestPageSize);
+ ulong sizeAligned = endAddressAligned - addressAligned;
+
+ if (!_tracking.VirtualMemoryEvent(map.Va + addressAligned, sizeAligned, write))
+ {
+ return 0;
+ }
+
+ return _readPtCallback(map.Va + address);
+ }
+
+ public override void Destroy()
+ {
+ _memoryEh.Dispose();
+
+ base.Destroy();
+ }
+ }
+
+ private readonly MemoryTracking _tracking;
+ private readonly Func _readPtCallback;
+ private readonly object _lock;
+
+ public AddressSpacePartitionAllocator(
+ MemoryTracking tracking,
+ Func readPtCallback,
+ object locker) : base(DefaultBlockAlignment, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible)
+ {
+ _tracking = tracking;
+ _readPtCallback = readPtCallback;
+ _lock = locker;
+ }
+
+ public AddressSpacePartitionAllocation Allocate(ulong va, ulong size)
+ {
+ AddressSpacePartitionAllocation allocation = new(this, Allocate(size, MemoryBlock.GetPageSize(), CreateBlock));
+ allocation.RegisterMapping(va, va + size);
+
+ return allocation;
+ }
+
+ private Block CreateBlock(MemoryBlock memory, ulong size)
+ {
+ return new Block(_tracking, _readPtCallback, memory, size, _lock);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs
new file mode 100644
index 0000000000..3b065583f8
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitionMultiAllocation.cs
@@ -0,0 +1,101 @@
+using Ryujinx.Memory;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ class AddressSpacePartitionMultiAllocation : IDisposable
+ {
+ private readonly AddressSpacePartitionAllocation _baseMemory;
+ private AddressSpacePartitionAllocation _baseMemoryRo;
+ private AddressSpacePartitionAllocation _baseMemoryNone;
+
+ public AddressSpacePartitionMultiAllocation(AddressSpacePartitionAllocation baseMemory)
+ {
+ _baseMemory = baseMemory;
+ }
+
+ public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
+ {
+ _baseMemory.MapView(srcBlock, srcOffset, dstOffset, size);
+
+ if (_baseMemoryRo.IsValid)
+ {
+ _baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size);
+ _baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false);
+ }
+ }
+
+ public void LateMapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
+ {
+ _baseMemoryRo.MapView(srcBlock, srcOffset, dstOffset, size);
+ _baseMemoryRo.Reprotect(dstOffset, size, MemoryPermission.Read, false);
+ }
+
+ public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
+ {
+ _baseMemory.UnmapView(srcBlock, offset, size);
+
+ if (_baseMemoryRo.IsValid)
+ {
+ _baseMemoryRo.UnmapView(srcBlock, offset, size);
+ }
+ }
+
+ public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
+ {
+ _baseMemory.Reprotect(offset, size, permission, throwOnFail);
+ }
+
+ public IntPtr GetPointer(ulong offset, ulong size)
+ {
+ return _baseMemory.GetPointer(offset, size);
+ }
+
+ public bool LazyInitMirrorForProtection(AddressSpacePartitioned addressSpace, ulong blockAddress, ulong blockSize, MemoryPermission permission)
+ {
+ if (permission == MemoryPermission.None && !_baseMemoryNone.IsValid)
+ {
+ _baseMemoryNone = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize);
+ }
+ else if (permission == MemoryPermission.Read && !_baseMemoryRo.IsValid)
+ {
+ _baseMemoryRo = addressSpace.CreateAsPartitionAllocation(blockAddress, blockSize);
+
+ return true;
+ }
+
+ return false;
+ }
+
+ public IntPtr GetPointerForProtection(ulong offset, ulong size, MemoryPermission permission)
+ {
+ AddressSpacePartitionAllocation allocation = permission switch
+ {
+ MemoryPermission.ReadAndWrite => _baseMemory,
+ MemoryPermission.Read => _baseMemoryRo,
+ MemoryPermission.None => _baseMemoryNone,
+ _ => throw new ArgumentException($"Invalid protection \"{permission}\"."),
+ };
+
+ Debug.Assert(allocation.IsValid);
+
+ return allocation.GetPointer(offset, size);
+ }
+
+ public void Dispose()
+ {
+ _baseMemory.Dispose();
+
+ if (_baseMemoryRo.IsValid)
+ {
+ _baseMemoryRo.Dispose();
+ }
+
+ if (_baseMemoryNone.IsValid)
+ {
+ _baseMemoryNone.Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs
new file mode 100644
index 0000000000..2cf2c248b2
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/AddressSpacePartitioned.cs
@@ -0,0 +1,407 @@
+using Ryujinx.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ class AddressSpacePartitioned : IDisposable
+ {
+ private const int PartitionBits = 25;
+ private const ulong PartitionSize = 1UL << PartitionBits;
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly List _partitions;
+ private readonly AddressSpacePartitionAllocator _asAllocator;
+ private readonly Action _updatePtCallback;
+ private readonly bool _useProtectionMirrors;
+
+ public AddressSpacePartitioned(MemoryTracking tracking, MemoryBlock backingMemory, NativePageTable nativePageTable, bool useProtectionMirrors)
+ {
+ _backingMemory = backingMemory;
+ _partitions = new();
+ _asAllocator = new(tracking, nativePageTable.Read, _partitions);
+ _updatePtCallback = nativePageTable.Update;
+ _useProtectionMirrors = useProtectionMirrors;
+ }
+
+ public void Map(ulong va, ulong pa, ulong size)
+ {
+ ulong endVa = va + size;
+
+ lock (_partitions)
+ {
+ EnsurePartitionsLocked(va, size);
+
+ while (va < endVa)
+ {
+ int partitionIndex = FindPartitionIndexLocked(va);
+ AddressSpacePartition partition = _partitions[partitionIndex];
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ partition.Map(clampedVa, pa, clampedEndVa - clampedVa);
+
+ ulong currentSize = clampedEndVa - clampedVa;
+
+ va += currentSize;
+ pa += currentSize;
+
+ InsertOrRemoveBridgeIfNeeded(partitionIndex);
+ }
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ AddressSpacePartition partition;
+
+ lock (_partitions)
+ {
+ int partitionIndex = FindPartitionIndexLocked(va);
+ if (partitionIndex < 0)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ partition = _partitions[partitionIndex];
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ partition.Unmap(clampedVa, clampedEndVa - clampedVa);
+
+ va += clampedEndVa - clampedVa;
+
+ InsertOrRemoveBridgeIfNeeded(partitionIndex);
+
+ if (partition.IsEmpty())
+ {
+ _partitions.Remove(partition);
+ partition.Dispose();
+ }
+ }
+ }
+ }
+
+ public void Reprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ ulong endVa = va + size;
+
+ lock (_partitions)
+ {
+ while (va < endVa)
+ {
+ AddressSpacePartition partition = FindPartitionWithIndex(va, out int partitionIndex);
+
+ if (partition == null)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ if (_useProtectionMirrors)
+ {
+ partition.Reprotect(clampedVa, clampedEndVa - clampedVa, protection, this, _updatePtCallback);
+ }
+ else
+ {
+ partition.ReprotectAligned(clampedVa, clampedEndVa - clampedVa, protection);
+
+ if (clampedVa == partition.Address &&
+ partitionIndex > 0 &&
+ _partitions[partitionIndex - 1].EndAddress == partition.Address)
+ {
+ _partitions[partitionIndex - 1].ReprotectBridge(protection);
+ }
+ }
+
+ va += clampedEndVa - clampedVa;
+ }
+ }
+ }
+
+ public PrivateRange GetPrivateAllocation(ulong va)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ return PrivateRange.Empty;
+ }
+
+ return partition.GetPrivateAllocation(va);
+ }
+
+ public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ nextVa = (va & ~(PartitionSize - 1)) + PartitionSize;
+
+ return PrivateRange.Empty;
+ }
+
+ return partition.GetFirstPrivateAllocation(va, size, out nextVa);
+ }
+
+ public bool HasAnyPrivateAllocation(ulong va, ulong size, out PrivateRange range)
+ {
+ range = PrivateRange.Empty;
+
+ ulong startVa = va;
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa, startVa, size, ref range))
+ {
+ return true;
+ }
+
+ va += clampedEndVa - clampedVa;
+ }
+
+ return false;
+ }
+
+ private void InsertOrRemoveBridgeIfNeeded(int partitionIndex)
+ {
+ if (partitionIndex > 0)
+ {
+ if (_partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
+ {
+ _partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _useProtectionMirrors);
+ }
+ else
+ {
+ _partitions[partitionIndex - 1].InsertBridgeAtEnd(null, _useProtectionMirrors);
+ }
+ }
+
+ if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
+ {
+ _partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _useProtectionMirrors);
+ }
+ else
+ {
+ _partitions[partitionIndex].InsertBridgeAtEnd(null, _useProtectionMirrors);
+ }
+ }
+
+ public IntPtr GetPointer(ulong va, ulong size)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ return partition.GetPointer(va, size);
+ }
+
+ private static (ulong, ulong) ClampRange(AddressSpacePartition partition, ulong va, ulong endVa)
+ {
+ if (va < partition.Address)
+ {
+ va = partition.Address;
+ }
+
+ if (endVa > partition.EndAddress)
+ {
+ endVa = partition.EndAddress;
+ }
+
+ return (va, endVa);
+ }
+
+ private AddressSpacePartition FindPartition(ulong va)
+ {
+ lock (_partitions)
+ {
+ int index = FindPartitionIndexLocked(va);
+ if (index >= 0)
+ {
+ return _partitions[index];
+ }
+ }
+
+ return null;
+ }
+
+ private AddressSpacePartition FindPartitionWithIndex(ulong va, out int index)
+ {
+ lock (_partitions)
+ {
+ index = FindPartitionIndexLocked(va);
+ if (index >= 0)
+ {
+ return _partitions[index];
+ }
+ }
+
+ return null;
+ }
+
+ private int FindPartitionIndexLocked(ulong va)
+ {
+ int left = 0;
+ int middle;
+ int right = _partitions.Count - 1;
+
+ while (left <= right)
+ {
+ middle = left + ((right - left) >> 1);
+
+ AddressSpacePartition partition = _partitions[middle];
+
+ if (partition.Address <= va && partition.EndAddress > va)
+ {
+ return middle;
+ }
+
+ if (partition.Address >= va)
+ {
+ right = middle - 1;
+ }
+ else
+ {
+ left = middle + 1;
+ }
+ }
+
+ return -1;
+ }
+
+ private void EnsurePartitionsLocked(ulong va, ulong size)
+ {
+ ulong endVa = BitUtils.AlignUp(va + size, PartitionSize);
+ va = BitUtils.AlignDown(va, PartitionSize);
+
+ for (int i = 0; i < _partitions.Count && va < endVa; i++)
+ {
+ AddressSpacePartition partition = _partitions[i];
+
+ if (partition.Address <= va && partition.EndAddress > va)
+ {
+ if (partition.EndAddress >= endVa)
+ {
+ // Fully mapped already.
+ va = endVa;
+
+ break;
+ }
+
+ ulong gapSize;
+
+ if (i + 1 < _partitions.Count)
+ {
+ AddressSpacePartition nextPartition = _partitions[i + 1];
+
+ if (partition.EndAddress == nextPartition.Address)
+ {
+ va = partition.EndAddress;
+
+ continue;
+ }
+
+ gapSize = Math.Min(endVa, nextPartition.Address) - partition.EndAddress;
+ }
+ else
+ {
+ gapSize = endVa - partition.EndAddress;
+ }
+
+ _partitions.Insert(i + 1, CreateAsPartition(partition.EndAddress, gapSize));
+ va = partition.EndAddress + gapSize;
+ i++;
+ }
+ else if (partition.EndAddress > va)
+ {
+ Debug.Assert(partition.Address > va);
+
+ ulong gapSize;
+
+ if (partition.Address < endVa)
+ {
+ gapSize = partition.Address - va;
+ }
+ else
+ {
+ gapSize = endVa - va;
+ }
+
+ _partitions.Insert(i, CreateAsPartition(va, gapSize));
+ va = Math.Min(partition.EndAddress, endVa);
+ i++;
+ }
+ }
+
+ if (va < endVa)
+ {
+ _partitions.Add(CreateAsPartition(va, endVa - va));
+ }
+
+ ValidatePartitionList();
+ }
+
+ [Conditional("DEBUG")]
+ private void ValidatePartitionList()
+ {
+ for (int i = 1; i < _partitions.Count; i++)
+ {
+ Debug.Assert(_partitions[i].Address > _partitions[i - 1].Address);
+ Debug.Assert(_partitions[i].EndAddress > _partitions[i - 1].EndAddress);
+ }
+ }
+
+ private AddressSpacePartition CreateAsPartition(ulong va, ulong size)
+ {
+ return new(CreateAsPartitionAllocation(va, size), _backingMemory, va, size);
+ }
+
+ public AddressSpacePartitionAllocation CreateAsPartitionAllocation(ulong va, ulong size)
+ {
+ return _asAllocator.Allocate(va, size + MemoryBlock.GetPageSize());
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ foreach (AddressSpacePartition partition in _partitions)
+ {
+ partition.Dispose();
+ }
+
+ _partitions.Clear();
+ _asAllocator.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(disposing: true);
+ GC.SuppressFinalize(this);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs b/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs
new file mode 100644
index 0000000000..e3174e3fc5
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/HostTracked/NativePageTable.cs
@@ -0,0 +1,223 @@
+using Ryujinx.Cpu.Signal;
+using Ryujinx.Memory;
+using System;
+using System.Diagnostics;
+using System.Numerics;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Cpu.Jit.HostTracked
+{
+ sealed class NativePageTable : IDisposable
+ {
+ private delegate ulong TrackingEventDelegate(ulong address, ulong size, bool write);
+
+ private const int PageBits = 12;
+ private const int PageSize = 1 << PageBits;
+ private const int PageMask = PageSize - 1;
+
+ private const int PteSize = 8;
+
+ private readonly int _bitsPerPtPage;
+ private readonly int _entriesPerPtPage;
+ private readonly int _pageCommitmentBits;
+
+ private readonly PageTable _pageTable;
+ private readonly MemoryBlock _nativePageTable;
+ private readonly ulong[] _pageCommitmentBitmap;
+ private readonly ulong _hostPageSize;
+
+ private readonly TrackingEventDelegate _trackingEvent;
+
+ private bool _disposed;
+
+ public IntPtr PageTablePointer => _nativePageTable.Pointer;
+
+ public NativePageTable(ulong asSize)
+ {
+ ulong hostPageSize = MemoryBlock.GetPageSize();
+
+ _entriesPerPtPage = (int)(hostPageSize / sizeof(ulong));
+ _bitsPerPtPage = BitOperations.Log2((uint)_entriesPerPtPage);
+ _pageCommitmentBits = PageBits + _bitsPerPtPage;
+
+ _hostPageSize = hostPageSize;
+ _pageTable = new PageTable();
+ _nativePageTable = new MemoryBlock((asSize / PageSize) * PteSize + _hostPageSize, MemoryAllocationFlags.Reserve);
+ _pageCommitmentBitmap = new ulong[(asSize >> _pageCommitmentBits) / (sizeof(ulong) * 8)];
+
+ ulong ptStart = (ulong)_nativePageTable.Pointer;
+ ulong ptEnd = ptStart + _nativePageTable.Size;
+
+ _trackingEvent = VirtualMemoryEvent;
+
+ bool added = NativeSignalHandler.AddTrackedRegion((nuint)ptStart, (nuint)ptEnd, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
+
+ if (!added)
+ {
+ throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
+ }
+ }
+
+ public void Map(ulong va, ulong pa, ulong size, AddressSpacePartitioned addressSpace, MemoryBlock backingMemory, bool privateMap)
+ {
+ while (size != 0)
+ {
+ _pageTable.Map(va, pa);
+
+ EnsureCommitment(va);
+
+ if (privateMap)
+ {
+ _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, addressSpace.GetPointer(va, PageSize)));
+ }
+ else
+ {
+ _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, backingMemory.GetPointer(pa, PageSize)));
+ }
+
+ va += PageSize;
+ pa += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ IntPtr guardPagePtr = GetGuardPagePointer();
+
+ while (size != 0)
+ {
+ _pageTable.Unmap(va);
+ _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, guardPagePtr));
+
+ va += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ public ulong Read(ulong va)
+ {
+ ulong pte = _nativePageTable.Read((va / PageSize) * PteSize);
+
+ pte += va & ~(ulong)PageMask;
+
+ return pte + (va & PageMask);
+ }
+
+ public void Update(ulong va, IntPtr ptr, ulong size)
+ {
+ ulong remainingSize = size;
+
+ while (remainingSize != 0)
+ {
+ EnsureCommitment(va);
+
+ _nativePageTable.Write((va / PageSize) * PteSize, GetPte(va, ptr));
+
+ va += PageSize;
+ ptr += PageSize;
+ remainingSize -= PageSize;
+ }
+ }
+
+ private void EnsureCommitment(ulong va)
+ {
+ ulong bit = va >> _pageCommitmentBits;
+
+ int index = (int)(bit / (sizeof(ulong) * 8));
+ int shift = (int)(bit % (sizeof(ulong) * 8));
+
+ ulong mask = 1UL << shift;
+
+ ulong oldMask = _pageCommitmentBitmap[index];
+
+ if ((oldMask & mask) == 0)
+ {
+ lock (_pageCommitmentBitmap)
+ {
+ oldMask = _pageCommitmentBitmap[index];
+
+ if ((oldMask & mask) != 0)
+ {
+ return;
+ }
+
+ _nativePageTable.Commit(bit * _hostPageSize, _hostPageSize);
+
+ Span pageSpan = MemoryMarshal.Cast(_nativePageTable.GetSpan(bit * _hostPageSize, (int)_hostPageSize));
+
+ Debug.Assert(pageSpan.Length == _entriesPerPtPage);
+
+ IntPtr guardPagePtr = GetGuardPagePointer();
+
+ for (int i = 0; i < pageSpan.Length; i++)
+ {
+ pageSpan[i] = GetPte((bit << _pageCommitmentBits) | ((ulong)i * PageSize), guardPagePtr);
+ }
+
+ _pageCommitmentBitmap[index] = oldMask | mask;
+ }
+ }
+ }
+
+ private IntPtr GetGuardPagePointer()
+ {
+ return _nativePageTable.GetPointer(_nativePageTable.Size - _hostPageSize, _hostPageSize);
+ }
+
+ private static ulong GetPte(ulong va, IntPtr ptr)
+ {
+ Debug.Assert((va & PageMask) == 0);
+
+ return (ulong)ptr - va;
+ }
+
+ public ulong GetPhysicalAddress(ulong va)
+ {
+ return _pageTable.Read(va) + (va & PageMask);
+ }
+
+ private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
+ {
+ if (address < _nativePageTable.Size - _hostPageSize)
+ {
+ // Some prefetch instructions do not cause faults with invalid addresses.
+ // Retry if we are hitting a case where the page table is unmapped, the next
+ // run will execute the actual instruction.
+ // The address loaded from the page table will be invalid, and it should hit the else case
+ // if the instruction faults on unmapped or protected memory.
+
+ ulong va = address * (PageSize / sizeof(ulong));
+
+ EnsureCommitment(va);
+
+ return (ulong)_nativePageTable.Pointer + address;
+ }
+ else
+ {
+ throw new InvalidMemoryRegionException();
+ }
+ }
+
+ private void Dispose(bool disposing)
+ {
+ if (!_disposed)
+ {
+ if (disposing)
+ {
+ NativeSignalHandler.RemoveTrackedRegion((nuint)_nativePageTable.Pointer);
+
+ _nativePageTable.Dispose();
+ }
+
+ _disposed = true;
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(disposing: true);
+ GC.SuppressFinalize(this);
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/JitCpuContext.cs b/src/Ryujinx.Cpu/Jit/JitCpuContext.cs
index dce0490a41..9893c59b29 100644
--- a/src/Ryujinx.Cpu/Jit/JitCpuContext.cs
+++ b/src/Ryujinx.Cpu/Jit/JitCpuContext.cs
@@ -15,9 +15,9 @@ namespace Ryujinx.Cpu.Jit
_tickSource = tickSource;
_translator = new Translator(new JitMemoryAllocator(forJit: true), memory, for64Bit);
- if (memory.Type.IsHostMapped())
+ if (memory.Type.IsHostMappedOrTracked())
{
- NativeSignalHandler.InitializeSignalHandler(MemoryBlock.GetPageSize());
+ NativeSignalHandler.InitializeSignalHandler();
}
memory.UnmapEvent += UnmapHandler;
diff --git a/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs b/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs
new file mode 100644
index 0000000000..18404bcc74
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs
@@ -0,0 +1,627 @@
+using ARMeilleure.Memory;
+using Ryujinx.Cpu.Jit.HostTracked;
+using Ryujinx.Cpu.Signal;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Cpu.Jit
+{
+ ///
+ /// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region.
+ ///
+ public sealed class MemoryManagerHostTracked : VirtualMemoryManagerRefCountedBase, IWritableBlock, IMemoryManager, IVirtualMemoryManagerTracked
+ {
+ private readonly InvalidAccessHandler _invalidAccessHandler;
+ private readonly bool _unsafeMode;
+
+ private readonly MemoryBlock _backingMemory;
+
+ public int AddressSpaceBits { get; }
+
+ public MemoryTracking Tracking { get; }
+
+ private readonly NativePageTable _nativePageTable;
+ private readonly AddressSpacePartitioned _addressSpace;
+
+ private readonly ManagedPageFlags _pages;
+
+ protected override ulong AddressSpaceSize { get; }
+
+ ///
+ public bool Supports4KBPages => false;
+
+ public IntPtr PageTablePointer => _nativePageTable.PageTablePointer;
+
+ public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostTrackedUnsafe : MemoryManagerType.HostTracked;
+
+ public event Action UnmapEvent;
+
+ ///
+ /// Creates a new instance of the host tracked memory manager.
+ ///
+ /// Physical backing memory where virtual memory will be mapped to
+ /// Size of the address space
+ /// True if unmanaged access should not be masked (unsafe), false otherwise.
+ /// Optional function to handle invalid memory accesses
+ public MemoryManagerHostTracked(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler)
+ {
+ bool useProtectionMirrors = MemoryBlock.GetPageSize() > PageSize;
+
+ Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler, useProtectionMirrors);
+
+ _backingMemory = backingMemory;
+ _invalidAccessHandler = invalidAccessHandler;
+ _unsafeMode = unsafeMode;
+ AddressSpaceSize = addressSpaceSize;
+
+ ulong asSize = PageSize;
+ int asBits = PageBits;
+
+ while (asSize < AddressSpaceSize)
+ {
+ asSize <<= 1;
+ asBits++;
+ }
+
+ AddressSpaceBits = asBits;
+
+ if (useProtectionMirrors && !NativeSignalHandler.SupportsFaultAddressPatching())
+ {
+ // Currently we require being able to change the fault address to something else
+ // in order to "emulate" 4KB granularity protection on systems with larger page size.
+
+ throw new PlatformNotSupportedException();
+ }
+
+ _pages = new ManagedPageFlags(asBits);
+ _nativePageTable = new(asSize);
+ _addressSpace = new(Tracking, backingMemory, _nativePageTable, useProtectionMirrors);
+ }
+
+ ///
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (flags.HasFlag(MemoryMapFlags.Private))
+ {
+ _addressSpace.Map(va, pa, size);
+ }
+
+ _pages.AddMapping(va, size);
+ _nativePageTable.Map(va, pa, size, _addressSpace, _backingMemory, flags.HasFlag(MemoryMapFlags.Private));
+
+ Tracking.Map(va, size);
+ }
+
+ ///
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ public void Unmap(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ _addressSpace.Unmap(va, size);
+
+ UnmapEvent?.Invoke(va, size);
+ Tracking.Unmap(va, size);
+
+ _pages.RemoveMapping(va, size);
+ _nativePageTable.Unmap(va, size);
+ }
+
+ public T Read(ulong va) where T : unmanaged
+ {
+ return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf()))[0];
+ }
+
+ public T ReadTracked(ulong va) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), false);
+
+ return Read(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ public override void Read(ulong va, Span data)
+ {
+ ReadImpl(va, data);
+ }
+
+ public void Write(ulong va, T value) where T : unmanaged
+ {
+ Write(va, MemoryMarshal.Cast(MemoryMarshal.CreateSpan(ref value, 1)));
+ }
+
+ public void Write(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, true);
+
+ WriteImpl(va, data);
+ }
+
+ public void WriteUntracked(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ WriteImpl(va, data);
+ }
+
+ public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return false;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, false);
+
+ if (TryGetVirtualContiguous(va, data.Length, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ var target = memoryBlock.GetSpan(offset, data.Length);
+
+ bool changed = !data.SequenceEqual(target);
+
+ if (changed)
+ {
+ data.CopyTo(target);
+ }
+
+ return changed;
+ }
+ else
+ {
+ WriteImpl(va, data);
+
+ return true;
+ }
+ }
+
+ private void WriteImpl(ulong va, ReadOnlySpan data)
+ {
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ ulong endVa = va + (ulong)data.Length;
+ int offset = 0;
+
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
+
+ data.Slice(offset, (int)copySize).CopyTo(memory.GetSpan(rangeOffset, (int)copySize));
+
+ va += copySize;
+ offset += (int)copySize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ public ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return ReadOnlySpan.Empty;
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, false);
+ }
+
+ if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ return memoryBlock.GetSpan(offset, size);
+ }
+ else
+ {
+ Span data = new byte[size];
+
+ ReadImpl(va, data);
+
+ return data;
+ }
+ }
+
+ public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return new WritableRegion(null, va, Memory.Empty);
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, true);
+ }
+
+ if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ return new WritableRegion(null, va, memoryBlock.GetMemory(offset, size));
+ }
+ else
+ {
+ Memory memory = new byte[size];
+
+ ReadImpl(va, memory.Span);
+
+ return new WritableRegion(this, va, memory);
+ }
+ }
+
+ public ref T GetRef(ulong va) where T : unmanaged
+ {
+ if (!TryGetVirtualContiguous(va, Unsafe.SizeOf(), out MemoryBlock memory, out ulong offset))
+ {
+ ThrowMemoryNotContiguous();
+ }
+
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true);
+
+ return ref memory.GetRef(offset);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsMapped(ulong va)
+ {
+ return ValidateAddress(va) && _pages.IsMapped(va);
+ }
+
+ public bool IsRangeMapped(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return _pages.IsRangeMapped(va, size);
+ }
+
+ private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
+
+ private bool TryGetVirtualContiguous(ulong va, int size, out MemoryBlock memory, out ulong offset)
+ {
+ if (_addressSpace.HasAnyPrivateAllocation(va, (ulong)size, out PrivateRange range))
+ {
+ // If we have a private allocation overlapping the range,
+ // then the access is only considered contiguous if it covers the entire range.
+
+ if (range.Memory != null)
+ {
+ memory = range.Memory;
+ offset = range.Offset;
+
+ return true;
+ }
+
+ memory = null;
+ offset = 0;
+
+ return false;
+ }
+
+ memory = _backingMemory;
+ offset = GetPhysicalAddressInternal(va);
+
+ return IsPhysicalContiguous(va, size);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsPhysicalContiguous(ulong va, int size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
+ {
+ return false;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return false;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return false;
+ }
+
+ va += PageSize;
+ }
+
+ return true;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private ulong GetContiguousSize(ulong va, ulong size)
+ {
+ ulong contiguousSize = PageSize - (va & PageMask);
+
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return contiguousSize;
+ }
+
+ int pages = GetPagesCount(va, size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return contiguousSize;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return contiguousSize;
+ }
+
+ va += PageSize;
+ contiguousSize += PageSize;
+ }
+
+ return Math.Min(contiguousSize, size);
+ }
+
+ private (MemoryBlock, ulong, ulong) GetMemoryOffsetAndSize(ulong va, ulong size)
+ {
+ PrivateRange privateRange = _addressSpace.GetFirstPrivateAllocation(va, size, out ulong nextVa);
+
+ if (privateRange.Memory != null)
+ {
+ return (privateRange.Memory, privateRange.Offset, privateRange.Size);
+ }
+
+ ulong physSize = GetContiguousSize(va, Math.Min(size, nextVa - va));
+
+ return (_backingMemory, GetPhysicalAddressChecked(va), physSize);
+ }
+
+ public IEnumerable GetHostRegions(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ var regions = new List();
+ ulong endVa = va + size;
+
+ try
+ {
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong rangeSize) = GetMemoryOffsetAndSize(va, endVa - va);
+
+ regions.Add(new((UIntPtr)memory.GetPointer(rangeOffset, rangeSize), rangeSize));
+
+ va += rangeSize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ return null;
+ }
+
+ return regions;
+ }
+
+ public IEnumerable GetPhysicalRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty();
+ }
+
+ return GetPhysicalRegionsImpl(va, size);
+ }
+
+ private List GetPhysicalRegionsImpl(ulong va, ulong size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List();
+
+ ulong regionStart = GetPhysicalAddressInternal(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ ulong newPa = GetPhysicalAddressInternal(va + PageSize);
+
+ if (GetPhysicalAddressInternal(va) + PageSize != newPa)
+ {
+ regions.Add(new MemoryRange(regionStart, regionSize));
+ regionStart = newPa;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new MemoryRange(regionStart, regionSize));
+
+ return regions;
+ }
+
+ private void ReadImpl(ulong va, Span data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ ulong endVa = va + (ulong)data.Length;
+ int offset = 0;
+
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
+
+ memory.GetSpan(rangeOffset, (int)copySize).CopyTo(data.Slice(offset, (int)copySize));
+
+ va += copySize;
+ offset += (int)copySize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ ///
+ ///
+ /// This function also validates that the given range is both valid and mapped, and will throw if it is not.
+ ///
+ public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (precise)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
+ return;
+ }
+
+ // Software table, used for managed memory tracking.
+
+ _pages.SignalMemoryTracking(Tracking, va, size, write, exemptId);
+ }
+
+ ///
+ /// Computes the number of pages in a virtual address range.
+ ///
+ /// Virtual address of the range
+ /// Size of the range
+ /// The virtual address of the beginning of the first page
+ /// This function does not differentiate between allocated and unallocated pages.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int GetPagesCount(ulong va, ulong size, out ulong startVa)
+ {
+ // WARNING: Always check if ulong does not overflow during the operations.
+ startVa = va & ~(ulong)PageMask;
+ ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
+
+ return (int)(vaSpan / PageSize);
+ }
+
+ public RegionHandle BeginTracking(ulong address, ulong size, int id, RegionFlags flags = RegionFlags.None)
+ {
+ return Tracking.BeginTracking(address, size, id, flags);
+ }
+
+ public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable handles, ulong granularity, int id, RegionFlags flags = RegionFlags.None)
+ {
+ return Tracking.BeginGranularTracking(address, size, handles, granularity, id, flags);
+ }
+
+ public SmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
+ {
+ return Tracking.BeginSmartGranularTracking(address, size, granularity, id);
+ }
+
+ private ulong GetPhysicalAddressChecked(ulong va)
+ {
+ if (!IsMapped(va))
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
+ }
+
+ return GetPhysicalAddressInternal(va);
+ }
+
+ private ulong GetPhysicalAddressInternal(ulong va)
+ {
+ return _nativePageTable.GetPhysicalAddress(va);
+ }
+
+ ///
+ public void Reprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ // TODO
+ }
+
+ ///
+ public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection, bool guest)
+ {
+ if (guest)
+ {
+ _addressSpace.Reprotect(va, size, protection);
+ }
+ else
+ {
+ _pages.TrackingReprotect(va, size, protection);
+ }
+ }
+
+ ///
+ /// Disposes of resources used by the memory manager.
+ ///
+ protected override void Destroy()
+ {
+ _addressSpace.Dispose();
+ _nativePageTable.Dispose();
+ }
+
+ protected override Span GetPhysicalAddressSpan(ulong pa, int size)
+ => _backingMemory.GetSpan(pa, size);
+
+ protected override ulong TranslateVirtualAddressForRead(ulong va)
+ => GetPhysicalAddressInternal(va);
+ }
+}
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm32/Target/Arm64/InstEmitMemory.cs b/src/Ryujinx.Cpu/LightningJit/Arm32/Target/Arm64/InstEmitMemory.cs
index 6ab4b94953..d8caee6e74 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm32/Target/Arm64/InstEmitMemory.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm32/Target/Arm64/InstEmitMemory.cs
@@ -1126,11 +1126,23 @@ namespace Ryujinx.Cpu.LightningJit.Arm32.Target.Arm64
Operand destination64 = new(destination.Kind, OperandType.I64, destination.Value);
Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64);
- if (mmType == MemoryManagerType.HostMapped || mmType == MemoryManagerType.HostMappedUnsafe)
- {
- // We don't need to mask the address for the safe mode, since it is already naturally limited to 32-bit
- // and can never reach out of the guest address space.
+ // We don't need to mask the address for the safe mode, since it is already naturally limited to 32-bit
+ // and can never reach out of the guest address space.
+ if (mmType.IsHostTracked())
+ {
+ int tempRegister = regAlloc.AllocateTempGprRegister();
+
+ Operand pte = new(tempRegister, RegisterType.Integer, OperandType.I64);
+
+ asm.Lsr(pte, guestAddress, new Operand(OperandKind.Constant, OperandType.I32, 12));
+ asm.LdrRr(pte, basePointer, pte, ArmExtensionType.Uxtx, true);
+ asm.Add(destination64, pte, guestAddress);
+
+ regAlloc.FreeTempGprRegister(tempRegister);
+ }
+ else if (mmType.IsHostMapped())
+ {
asm.Add(destination64, basePointer, guestAddress);
}
else
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/InstName.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/InstName.cs
index 3656406453..3391a2c145 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/InstName.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/InstName.cs
@@ -1131,5 +1131,37 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
return false;
}
+
+ public static bool IsPartialRegisterUpdateMemory(this InstName name)
+ {
+ switch (name)
+ {
+ case InstName.Ld1AdvsimdSnglAsNoPostIndex:
+ case InstName.Ld1AdvsimdSnglAsPostIndex:
+ case InstName.Ld2AdvsimdSnglAsNoPostIndex:
+ case InstName.Ld2AdvsimdSnglAsPostIndex:
+ case InstName.Ld3AdvsimdSnglAsNoPostIndex:
+ case InstName.Ld3AdvsimdSnglAsPostIndex:
+ case InstName.Ld4AdvsimdSnglAsNoPostIndex:
+ case InstName.Ld4AdvsimdSnglAsPostIndex:
+ return true;
+ }
+
+ return false;
+ }
+
+ public static bool IsPrefetchMemory(this InstName name)
+ {
+ switch (name)
+ {
+ case InstName.PrfmImm:
+ case InstName.PrfmLit:
+ case InstName.PrfmReg:
+ case InstName.Prfum:
+ return true;
+ }
+
+ return false;
+ }
}
}
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterAllocator.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterAllocator.cs
index c9a932093d..1c6eab0de2 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterAllocator.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterAllocator.cs
@@ -1,15 +1,12 @@
+using ARMeilleure.Memory;
using Ryujinx.Cpu.LightningJit.CodeGen.Arm64;
using System;
-using System.Diagnostics;
using System.Numerics;
namespace Ryujinx.Cpu.LightningJit.Arm64
{
class RegisterAllocator
{
- public const int MaxTemps = 1;
- public const int MaxTempsInclFixed = MaxTemps + 2;
-
private uint _gprMask;
private readonly uint _fpSimdMask;
private readonly uint _pStateMask;
@@ -25,7 +22,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
public uint AllFpSimdMask => _fpSimdMask;
public uint AllPStateMask => _pStateMask;
- public RegisterAllocator(uint gprMask, uint fpSimdMask, uint pStateMask, bool hasHostCall)
+ public RegisterAllocator(MemoryManagerType mmType, uint gprMask, uint fpSimdMask, uint pStateMask, bool hasHostCall)
{
_gprMask = gprMask;
_fpSimdMask = fpSimdMask;
@@ -56,7 +53,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
BuildRegisterMap(_registerMap);
- Span tempRegisters = stackalloc int[MaxTemps];
+ Span tempRegisters = stackalloc int[CalculateMaxTemps(mmType)];
for (int index = 0; index < tempRegisters.Length; index++)
{
@@ -150,5 +147,15 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
{
mask &= ~(1u << index);
}
+
+ public static int CalculateMaxTemps(MemoryManagerType mmType)
+ {
+ return mmType.IsHostMapped() ? 1 : 2;
+ }
+
+ public static int CalculateMaxTempsInclFixed(MemoryManagerType mmType)
+ {
+ return CalculateMaxTemps(mmType) + 2;
+ }
}
}
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterUtils.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterUtils.cs
index eb3fc229fe..191e03e7b1 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterUtils.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/RegisterUtils.cs
@@ -247,7 +247,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
}
}
- if (!flags.HasFlag(InstFlags.ReadRt))
+ if (!flags.HasFlag(InstFlags.ReadRt) || name.IsPartialRegisterUpdateMemory())
{
if (flags.HasFlag(InstFlags.Rt))
{
@@ -281,7 +281,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64
gprMask |= MaskFromIndex(ExtractRd(flags, encoding));
}
- if (!flags.HasFlag(InstFlags.ReadRt))
+ if (!flags.HasFlag(InstFlags.ReadRt) || name.IsPartialRegisterUpdateMemory())
{
if (flags.HasFlag(InstFlags.Rt))
{
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Compiler.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Compiler.cs
index 7ef3bf49b9..7a6d761e8d 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Compiler.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Compiler.cs
@@ -316,7 +316,7 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
uint pStateUseMask = multiBlock.GlobalUseMask.PStateMask;
CodeWriter writer = new();
- RegisterAllocator regAlloc = new(gprUseMask, fpSimdUseMask, pStateUseMask, multiBlock.HasHostCall);
+ RegisterAllocator regAlloc = new(memoryManager.Type, gprUseMask, fpSimdUseMask, pStateUseMask, multiBlock.HasHostCall);
RegisterSaveRestore rsr = new(
regAlloc.AllGprMask & AbiConstants.GprCalleeSavedRegsMask,
regAlloc.AllFpSimdMask & AbiConstants.FpSimdCalleeSavedRegsMask,
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Decoder.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Decoder.cs
index 00a1758f29..d5e1eb19c2 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Decoder.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/Decoder.cs
@@ -274,7 +274,8 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
uint tempGprUseMask = gprUseMask | instGprReadMask | instGprWriteMask;
- if (CalculateAvailableTemps(tempGprUseMask) < CalculateRequiredGprTemps(tempGprUseMask) || totalInsts++ >= MaxInstructionsPerFunction)
+ if (CalculateAvailableTemps(tempGprUseMask) < CalculateRequiredGprTemps(memoryManager.Type, tempGprUseMask) ||
+ totalInsts++ >= MaxInstructionsPerFunction)
{
isTruncated = true;
address -= 4UL;
@@ -378,9 +379,9 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
return false;
}
- private static int CalculateRequiredGprTemps(uint gprUseMask)
+ private static int CalculateRequiredGprTemps(MemoryManagerType mmType, uint gprUseMask)
{
- return BitOperations.PopCount(gprUseMask & RegisterUtils.ReservedRegsMask) + RegisterAllocator.MaxTempsInclFixed;
+ return BitOperations.PopCount(gprUseMask & RegisterUtils.ReservedRegsMask) + RegisterAllocator.CalculateMaxTempsInclFixed(mmType);
}
private static int CalculateAvailableTemps(uint gprUseMask)
diff --git a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/InstEmitMemory.cs b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/InstEmitMemory.cs
index e03d9373a1..790a7de95b 100644
--- a/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/InstEmitMemory.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Arm64/Target/Arm64/InstEmitMemory.cs
@@ -55,6 +55,16 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
ulong pc,
uint encoding)
{
+ if (name.IsPrefetchMemory() && mmType == MemoryManagerType.HostTrackedUnsafe)
+ {
+ // Prefetch to invalid addresses do not cause faults, so for memory manager
+ // types where we need to access the page table before doing the prefetch,
+ // we should make sure we won't try to access an out of bounds page table region.
+ // To do this, we force the masked memory manager variant to be used.
+
+ mmType = MemoryManagerType.HostTracked;
+ }
+
switch (addressForm)
{
case AddressForm.OffsetReg:
@@ -511,18 +521,48 @@ namespace Ryujinx.Cpu.LightningJit.Arm64.Target.Arm64
WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, guestAddress);
}
- private static void WriteAddressTranslation(int asBits, MemoryManagerType mmType, RegisterAllocator regAlloc, ref Assembler asm, Operand destination, ulong guestAddress)
+ private static void WriteAddressTranslation(
+ int asBits,
+ MemoryManagerType mmType,
+ RegisterAllocator regAlloc,
+ ref Assembler asm,
+ Operand destination,
+ ulong guestAddress)
{
asm.Mov(destination, guestAddress);
WriteAddressTranslation(asBits, mmType, regAlloc, ref asm, destination, destination);
}
- private static void WriteAddressTranslation(int asBits, MemoryManagerType mmType, RegisterAllocator regAlloc, ref Assembler asm, Operand destination, Operand guestAddress)
+ private static void WriteAddressTranslation(
+ int asBits,
+ MemoryManagerType mmType,
+ RegisterAllocator regAlloc,
+ ref Assembler asm,
+ Operand destination,
+ Operand guestAddress)
{
Operand basePointer = new(regAlloc.FixedPageTableRegister, RegisterType.Integer, OperandType.I64);
- if (mmType == MemoryManagerType.HostMapped || mmType == MemoryManagerType.HostMappedUnsafe)
+ if (mmType.IsHostTracked())
+ {
+ int tempRegister = regAlloc.AllocateTempGprRegister();
+
+ Operand pte = new(tempRegister, RegisterType.Integer, OperandType.I64);
+
+ asm.Lsr(pte, guestAddress, new Operand(OperandKind.Constant, OperandType.I32, 12));
+
+ if (mmType == MemoryManagerType.HostTracked)
+ {
+ asm.And(pte, pte, new Operand(OperandKind.Constant, OperandType.I64, ulong.MaxValue >> (64 - (asBits - 12))));
+ }
+
+ asm.LdrRr(pte, basePointer, pte, ArmExtensionType.Uxtx, true);
+ asm.Add(destination, pte, guestAddress);
+
+ regAlloc.FreeTempGprRegister(tempRegister);
+ }
+ else if (mmType.IsHostMapped())
{
if (mmType == MemoryManagerType.HostMapped)
{
diff --git a/src/Ryujinx.Cpu/LightningJit/Translator.cs b/src/Ryujinx.Cpu/LightningJit/Translator.cs
index c883c1d601..d624102534 100644
--- a/src/Ryujinx.Cpu/LightningJit/Translator.cs
+++ b/src/Ryujinx.Cpu/LightningJit/Translator.cs
@@ -68,9 +68,9 @@ namespace Ryujinx.Cpu.LightningJit
FunctionTable.Fill = (ulong)Stubs.SlowDispatchStub;
- if (memory.Type.IsHostMapped())
+ if (memory.Type.IsHostMappedOrTracked())
{
- NativeSignalHandler.InitializeSignalHandler(MemoryBlock.GetPageSize());
+ NativeSignalHandler.InitializeSignalHandler();
}
}
diff --git a/src/Ryujinx.Cpu/MemoryEhMeilleure.cs b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
index f3a5b056bc..379ace9413 100644
--- a/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
+++ b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
@@ -1,3 +1,4 @@
+using Ryujinx.Common;
using Ryujinx.Cpu.Signal;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
@@ -8,19 +9,27 @@ namespace Ryujinx.Cpu
{
public class MemoryEhMeilleure : IDisposable
{
- private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
+ public delegate ulong TrackingEventDelegate(ulong address, ulong size, bool write);
+ private readonly MemoryTracking _tracking;
private readonly TrackingEventDelegate _trackingEvent;
+ private readonly ulong _pageSize;
+
private readonly ulong _baseAddress;
private readonly ulong _mirrorAddress;
- public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking)
+ public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking, TrackingEventDelegate trackingEvent = null)
{
_baseAddress = (ulong)addressSpace.Pointer;
+
ulong endAddress = _baseAddress + addressSpace.Size;
- _trackingEvent = tracking.VirtualMemoryEvent;
+ _tracking = tracking;
+ _trackingEvent = trackingEvent ?? VirtualMemoryEvent;
+
+ _pageSize = MemoryBlock.GetPageSize();
+
bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)
@@ -28,7 +37,7 @@ namespace Ryujinx.Cpu
throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
}
- if (OperatingSystem.IsWindows())
+ if (OperatingSystem.IsWindows() && addressSpaceMirror != null)
{
// Add a tracking event with no signal handler for the mirror on Windows.
// The native handler has its own code to check for the partial overlap race when regions are protected by accident,
@@ -46,6 +55,21 @@ namespace Ryujinx.Cpu
}
}
+ private ulong VirtualMemoryEvent(ulong address, ulong size, bool write)
+ {
+ ulong pageSize = _pageSize;
+ ulong addressAligned = BitUtils.AlignDown(address, pageSize);
+ ulong endAddressAligned = BitUtils.AlignUp(address + size, pageSize);
+ ulong sizeAligned = endAddressAligned - addressAligned;
+
+ if (_tracking.VirtualMemoryEvent(addressAligned, sizeAligned, write))
+ {
+ return _baseAddress + address;
+ }
+
+ return 0;
+ }
+
public void Dispose()
{
GC.SuppressFinalize(this);
diff --git a/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
index ce8e834198..8db74f1e92 100644
--- a/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
+++ b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
@@ -143,7 +143,7 @@ namespace Ryujinx.Cpu
}
}
- public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
+ public PrivateMemoryAllocator(ulong blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
{
}
@@ -180,10 +180,10 @@ namespace Ryujinx.Cpu
private readonly List _blocks;
- private readonly int _blockAlignment;
+ private readonly ulong _blockAlignment;
private readonly MemoryAllocationFlags _allocationFlags;
- public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags)
+ public PrivateMemoryAllocatorImpl(ulong blockAlignment, MemoryAllocationFlags allocationFlags)
{
_blocks = new List();
_blockAlignment = blockAlignment;
@@ -212,7 +212,7 @@ namespace Ryujinx.Cpu
}
}
- ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment);
+ ulong blockAlignedSize = BitUtils.AlignUp(size, _blockAlignment);
var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
var newBlock = createBlock(memory, blockAlignedSize);
diff --git a/src/Ryujinx.Cpu/Signal/NativeSignalHandler.cs b/src/Ryujinx.Cpu/Signal/NativeSignalHandler.cs
index 5a9d92cc4f..93e6083298 100644
--- a/src/Ryujinx.Cpu/Signal/NativeSignalHandler.cs
+++ b/src/Ryujinx.Cpu/Signal/NativeSignalHandler.cs
@@ -70,7 +70,7 @@ namespace Ryujinx.Cpu.Signal
config = new SignalHandlerConfig();
}
- public static void InitializeSignalHandler(ulong pageSize, Func customSignalHandlerFactory = null)
+ public static void InitializeSignalHandler(Func customSignalHandlerFactory = null)
{
if (_initialized)
{
@@ -90,7 +90,7 @@ namespace Ryujinx.Cpu.Signal
if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
- _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateUnixSignalHandler(_handlerConfig, rangeStructSize, pageSize));
+ _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateUnixSignalHandler(_handlerConfig, rangeStructSize));
if (customSignalHandlerFactory != null)
{
@@ -107,7 +107,7 @@ namespace Ryujinx.Cpu.Signal
config.StructAddressOffset = 40; // ExceptionInformation1
config.StructWriteOffset = 32; // ExceptionInformation0
- _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateWindowsSignalHandler(_handlerConfig, rangeStructSize, pageSize));
+ _signalHandlerPtr = MapCode(NativeSignalHandlerGenerator.GenerateWindowsSignalHandler(_handlerConfig, rangeStructSize));
if (customSignalHandlerFactory != null)
{
@@ -175,5 +175,10 @@ namespace Ryujinx.Cpu.Signal
return false;
}
+
+ public static bool SupportsFaultAddressPatching()
+ {
+ return NativeSignalHandlerGenerator.SupportsFaultAddressPatchingForHost();
+ }
}
}
diff --git a/src/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs b/src/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
index fb20974441..34a9a9c75f 100644
--- a/src/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
+++ b/src/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
@@ -1622,14 +1622,6 @@ namespace Ryujinx.Graphics.Gpu.Image
/// The size of the flushing memory access
public void FlushAction(TextureGroupHandle handle, ulong address, ulong size)
{
- // If the page size is larger than 4KB, we will have a lot of false positives for flushing.
- // Let's avoid flushing textures that are unlikely to be read from CPU to improve performance
- // on those platforms.
- if (!_physicalMemory.Supports4KBPages && !Storage.Info.IsLinear && !_context.IsGpuThread())
- {
- return;
- }
-
// There is a small gap here where the action is removed but _actionRegistered is still 1.
// In this case it will skip registering the action, but here we are already handling it,
// so there shouldn't be any issue as it's the same handler for all actions.
diff --git a/src/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs b/src/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
index cca02bb156..ce970fab7c 100644
--- a/src/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
+++ b/src/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
@@ -23,11 +23,6 @@ namespace Ryujinx.Graphics.Gpu.Memory
private readonly IVirtualMemoryManagerTracked _cpuMemory;
private int _referenceCount;
- ///
- /// Indicates whenever the memory manager supports 4KB pages.
- ///
- public bool Supports4KBPages => _cpuMemory.Supports4KBPages;
-
///
/// In-memory shader cache.
///
diff --git a/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs b/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs
index 7f687fb4cf..e4ea0e4e61 100644
--- a/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs
+++ b/src/Ryujinx.Graphics.Vulkan/ShaderCollection.cs
@@ -111,8 +111,8 @@ namespace Ryujinx.Graphics.Vulkan
bool usePushDescriptors = !isMinimal &&
VulkanConfiguration.UsePushDescriptors &&
_gd.Capabilities.SupportsPushDescriptors &&
- !_gd.IsNvidiaPreTuring &&
!IsCompute &&
+ !HasPushDescriptorsBug(gd) &&
CanUsePushDescriptors(gd, resourceLayout, IsCompute);
ReadOnlyCollection sets = usePushDescriptors ?
@@ -147,6 +147,12 @@ namespace Ryujinx.Graphics.Vulkan
_firstBackgroundUse = !fromCache;
}
+ private static bool HasPushDescriptorsBug(VulkanRenderer gd)
+ {
+ // Those GPUs/drivers do not work properly with push descriptors, so we must force disable them.
+ return gd.IsNvidiaPreTuring || (gd.IsIntelArc && gd.IsIntelWindows);
+ }
+
private static bool CanUsePushDescriptors(VulkanRenderer gd, ResourceLayout layout, bool isCompute)
{
// If binding 3 is immediately used, use an alternate set of reserved bindings.
diff --git a/src/Ryujinx.Graphics.Vulkan/Vendor.cs b/src/Ryujinx.Graphics.Vulkan/Vendor.cs
index ff841dec93..e0f5690793 100644
--- a/src/Ryujinx.Graphics.Vulkan/Vendor.cs
+++ b/src/Ryujinx.Graphics.Vulkan/Vendor.cs
@@ -1,3 +1,4 @@
+using Silk.NET.Vulkan;
using System.Text.RegularExpressions;
namespace Ryujinx.Graphics.Vulkan
@@ -61,5 +62,36 @@ namespace Ryujinx.Graphics.Vulkan
_ => $"0x{id:X}",
};
}
+
+ public static string GetFriendlyDriverName(DriverId id)
+ {
+ return id switch
+ {
+ DriverId.AmdProprietary => "AMD",
+ DriverId.AmdOpenSource => "AMD (Open)",
+ DriverId.ArmProprietary => "ARM",
+ DriverId.BroadcomProprietary => "Broadcom",
+ DriverId.CoreaviProprietary => "CoreAVI",
+ DriverId.GgpProprietary => "GGP",
+ DriverId.GoogleSwiftshader => "SwiftShader",
+ DriverId.ImaginationProprietary => "Imagination",
+ DriverId.IntelOpenSourceMesa => "Intel (Open)",
+ DriverId.IntelProprietaryWindows => "Intel",
+ DriverId.JuiceProprietary => "Juice",
+ DriverId.MesaDozen => "Dozen",
+ DriverId.MesaLlvmpipe => "LLVMpipe",
+ DriverId.MesaPanvk => "PanVK",
+ DriverId.MesaRadv => "RADV",
+ DriverId.MesaTurnip => "Turnip",
+ DriverId.MesaV3DV => "V3DV",
+ DriverId.MesaVenus => "Venus",
+ DriverId.Moltenvk => "MoltenVK",
+ DriverId.NvidiaProprietary => "NVIDIA",
+ DriverId.QualcommProprietary => "Qualcomm",
+ DriverId.SamsungProprietary => "Samsung",
+ DriverId.VerisiliconProprietary => "Verisilicon",
+ _ => id.ToString(),
+ };
+ }
}
}
diff --git a/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs b/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs
index 7d7c109525..d1afeaeaed 100644
--- a/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs
+++ b/src/Ryujinx.Graphics.Vulkan/VulkanRenderer.cs
@@ -87,6 +87,7 @@ namespace Ryujinx.Graphics.Vulkan
internal bool IsIntelWindows { get; private set; }
internal bool IsAmdGcn { get; private set; }
internal bool IsNvidiaPreTuring { get; private set; }
+ internal bool IsIntelArc { get; private set; }
internal bool IsMoltenVk { get; private set; }
internal bool IsTBDR { get; private set; }
internal bool IsSharedMemory { get; private set; }
@@ -310,6 +311,50 @@ namespace Ryujinx.Graphics.Vulkan
ref var properties = ref properties2.Properties;
+ var hasDriverProperties = _physicalDevice.TryGetPhysicalDeviceDriverPropertiesKHR(Api, out var driverProperties);
+
+ Vendor = VendorUtils.FromId(properties.VendorID);
+
+ IsAmdWindows = Vendor == Vendor.Amd && OperatingSystem.IsWindows();
+ IsIntelWindows = Vendor == Vendor.Intel && OperatingSystem.IsWindows();
+ IsTBDR =
+ Vendor == Vendor.Apple ||
+ Vendor == Vendor.Qualcomm ||
+ Vendor == Vendor.ARM ||
+ Vendor == Vendor.Broadcom ||
+ Vendor == Vendor.ImgTec;
+
+ GpuVendor = VendorUtils.GetNameFromId(properties.VendorID);
+ GpuDriver = hasDriverProperties && !OperatingSystem.IsMacOS() ?
+ VendorUtils.GetFriendlyDriverName(driverProperties.DriverID) : GpuVendor; // Fallback to vendor name if driver is unavailable or on MacOS where vendor is preferred.
+
+ fixed (byte* deviceName = properties.DeviceName)
+ {
+ GpuRenderer = Marshal.PtrToStringAnsi((IntPtr)deviceName);
+ }
+
+ GpuVersion = $"Vulkan v{ParseStandardVulkanVersion(properties.ApiVersion)}, Driver v{ParseDriverVersion(ref properties)}";
+
+ IsAmdGcn = !IsMoltenVk && Vendor == Vendor.Amd && VendorUtils.AmdGcnRegex().IsMatch(GpuRenderer);
+
+ if (Vendor == Vendor.Nvidia)
+ {
+ var match = VendorUtils.NvidiaConsumerClassRegex().Match(GpuRenderer);
+
+ if (match != null && int.TryParse(match.Groups[2].Value, out int gpuNumber))
+ {
+ IsNvidiaPreTuring = gpuNumber < 2000;
+ }
+ else if (GpuDriver.Contains("TITAN") && !GpuDriver.Contains("RTX"))
+ {
+ IsNvidiaPreTuring = true;
+ }
+ }
+ else if (Vendor == Vendor.Intel)
+ {
+ IsIntelArc = GpuRenderer.StartsWith("Intel(R) Arc(TM)");
+ }
+
ulong minResourceAlignment = Math.Max(
Math.Max(
properties.Limits.MinStorageBufferOffsetAlignment,
@@ -732,49 +777,6 @@ namespace Ryujinx.Graphics.Vulkan
return ParseStandardVulkanVersion(driverVersionRaw);
}
- private unsafe void PrintGpuInformation()
- {
- var properties = _physicalDevice.PhysicalDeviceProperties;
-
- var hasDriverProperties = _physicalDevice.TryGetPhysicalDeviceDriverPropertiesKHR(Api, out var driverProperties);
-
- string vendorName = VendorUtils.GetNameFromId(properties.VendorID);
-
- Vendor = VendorUtils.FromId(properties.VendorID);
-
- IsAmdWindows = Vendor == Vendor.Amd && OperatingSystem.IsWindows();
- IsIntelWindows = Vendor == Vendor.Intel && OperatingSystem.IsWindows();
- IsTBDR =
- Vendor == Vendor.Apple ||
- Vendor == Vendor.Qualcomm ||
- Vendor == Vendor.ARM ||
- Vendor == Vendor.Broadcom ||
- Vendor == Vendor.ImgTec;
-
- GpuVendor = vendorName;
- GpuDriver = hasDriverProperties ? Marshal.PtrToStringAnsi((IntPtr)driverProperties.DriverName) : vendorName; // Fall back to vendor name if driver name isn't available.
- GpuRenderer = Marshal.PtrToStringAnsi((IntPtr)properties.DeviceName);
- GpuVersion = $"Vulkan v{ParseStandardVulkanVersion(properties.ApiVersion)}, Driver v{ParseDriverVersion(ref properties)}";
-
- IsAmdGcn = !IsMoltenVk && Vendor == Vendor.Amd && VendorUtils.AmdGcnRegex().IsMatch(GpuRenderer);
-
- if (Vendor == Vendor.Nvidia)
- {
- var match = VendorUtils.NvidiaConsumerClassRegex().Match(GpuRenderer);
-
- if (match != null && int.TryParse(match.Groups[2].Value, out int gpuNumber))
- {
- IsNvidiaPreTuring = gpuNumber < 2000;
- }
- else if (GpuDriver.Contains("TITAN") && !GpuDriver.Contains("RTX"))
- {
- IsNvidiaPreTuring = true;
- }
- }
-
- Logger.Notice.Print(LogClass.Gpu, $"{GpuVendor} {GpuRenderer} ({GpuVersion})");
- }
-
internal PrimitiveTopology TopologyRemap(PrimitiveTopology topology)
{
return topology switch
@@ -798,6 +800,11 @@ namespace Ryujinx.Graphics.Vulkan
};
}
+ private void PrintGpuInformation()
+ {
+ Logger.Notice.Print(LogClass.Gpu, $"{GpuVendor} {GpuRenderer} ({GpuVersion})");
+ }
+
public void Initialize(GraphicsDebugLevel logLevel)
{
SetupContext(logLevel);
diff --git a/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs b/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
index 06b8fd3454..e8c433269e 100644
--- a/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
+++ b/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
@@ -72,7 +72,8 @@ namespace Ryujinx.HLE.HOS
AddressSpace addressSpace = null;
- if (mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe)
+ // We want to use host tracked mode if the host page size is > 4KB.
+ if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000)
{
if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace))
{
@@ -91,13 +92,21 @@ namespace Ryujinx.HLE.HOS
case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe:
- if (addressSpaceSize != addressSpace.AddressSpaceSize)
+ if (addressSpace == null)
{
- Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
+ var memoryManagerHostTracked = new MemoryManagerHostTracked(context.Memory, addressSpaceSize, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
+ processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostTracked, addressSpaceSize, for64Bit);
}
+ else
+ {
+ if (addressSpaceSize != addressSpace.AddressSpaceSize)
+ {
+ Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
+ }
- var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
- processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
+ var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
+ processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
+ }
break;
default:
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
index 543acb7a0a..d7b601d1c5 100644
--- a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -165,6 +165,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
///
protected override Result MapForeign(IEnumerable regions, ulong va, ulong size)
{
+ ulong backingStart = (ulong)Context.Memory.Pointer;
+ ulong backingEnd = backingStart + Context.Memory.Size;
+
+ KPageList pageList = new();
+
+ foreach (HostMemoryRange region in regions)
+ {
+ // If the range is inside the physical memory, it is shared and we should increment the page count,
+ // otherwise it is private and we don't need to increment the page count.
+
+ if (region.Address >= backingStart && region.Address < backingEnd)
+ {
+ pageList.AddRange(region.Address - backingStart + DramMemoryMap.DramBase, region.Size / PageSize);
+ }
+ }
+
+ using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
+
+ foreach (var pageNode in pageList)
+ {
+ Context.CommitMemory(pageNode.Address - DramMemoryMap.DramBase, pageNode.PagesCount * PageSize);
+ }
+
ulong offset = 0;
foreach (var region in regions)
@@ -174,6 +197,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
offset += region.Size;
}
+ scopedPageList.SignalSuccess();
+
return Result.Success;
}
diff --git a/src/Ryujinx.Memory/AddressSpaceManager.cs b/src/Ryujinx.Memory/AddressSpaceManager.cs
index 0a4a951439..f19b45b659 100644
--- a/src/Ryujinx.Memory/AddressSpaceManager.cs
+++ b/src/Ryujinx.Memory/AddressSpaceManager.cs
@@ -283,9 +283,9 @@ namespace Ryujinx.Memory
{
var hostRegion = hostRegions[i];
- if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd)
+ if (hostRegion.Address >= backingStart && hostRegion.Address < backingEnd)
{
- regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size);
+ regions[count++] = new MemoryRange(hostRegion.Address - backingStart, hostRegion.Size);
}
}
diff --git a/src/Ryujinx.Memory/Tracking/VirtualRegion.cs b/src/Ryujinx.Memory/Tracking/VirtualRegion.cs
index bb087e9af0..35e9c2d9b2 100644
--- a/src/Ryujinx.Memory/Tracking/VirtualRegion.cs
+++ b/src/Ryujinx.Memory/Tracking/VirtualRegion.cs
@@ -70,9 +70,12 @@ namespace Ryujinx.Memory.Tracking
{
_lastPermission = MemoryPermission.Invalid;
- foreach (RegionHandle handle in Handles)
+ if (!Guest)
{
- handle.SignalMappingChanged(mapped);
+ foreach (RegionHandle handle in Handles)
+ {
+ handle.SignalMappingChanged(mapped);
+ }
}
}
diff --git a/src/Ryujinx/AppHost.cs b/src/Ryujinx/AppHost.cs
index a7e5498975..72a3552662 100644
--- a/src/Ryujinx/AppHost.cs
+++ b/src/Ryujinx/AppHost.cs
@@ -421,6 +421,12 @@ namespace Ryujinx.Ava
Device.Configuration.MultiplayerMode = e.NewValue;
}
+ public void ToggleVSync()
+ {
+ Device.EnableDeviceVsync = !Device.EnableDeviceVsync;
+ _renderer.Window.ChangeVSyncMode(Device.EnableDeviceVsync);
+ }
+
public void Stop()
{
_isActive = false;
@@ -1076,8 +1082,7 @@ namespace Ryujinx.Ava
switch (currentHotkeyState)
{
case KeyboardHotkeyState.ToggleVSync:
- Device.EnableDeviceVsync = !Device.EnableDeviceVsync;
-
+ ToggleVSync();
break;
case KeyboardHotkeyState.Screenshot:
ScreenshotRequested = true;
diff --git a/src/Ryujinx/UI/Views/Main/MainStatusBarView.axaml.cs b/src/Ryujinx/UI/Views/Main/MainStatusBarView.axaml.cs
index 239a7cbfca..5284957130 100644
--- a/src/Ryujinx/UI/Views/Main/MainStatusBarView.axaml.cs
+++ b/src/Ryujinx/UI/Views/Main/MainStatusBarView.axaml.cs
@@ -33,7 +33,7 @@ namespace Ryujinx.Ava.UI.Views.Main
private void VsyncStatus_PointerReleased(object sender, PointerReleasedEventArgs e)
{
- Window.ViewModel.AppHost.Device.EnableDeviceVsync = !Window.ViewModel.AppHost.Device.EnableDeviceVsync;
+ Window.ViewModel.AppHost.ToggleVSync();
Logger.Info?.Print(LogClass.Application, $"VSync toggled to: {Window.ViewModel.AppHost.Device.EnableDeviceVsync}");
}