2018-11-28 23:18:09 +01:00
|
|
|
using ChocolArm64.Instructions;
|
2018-10-31 02:43:02 +01:00
|
|
|
using System;
|
|
|
|
using System.Runtime.CompilerServices;
|
|
|
|
using System.Runtime.InteropServices;
|
|
|
|
using System.Runtime.Intrinsics;
|
|
|
|
using System.Runtime.Intrinsics.X86;
|
|
|
|
using System.Threading;
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
using static ChocolArm64.Memory.CompareExchange128;
|
2019-02-24 08:24:35 +01:00
|
|
|
using static ChocolArm64.Memory.MemoryManagement;
|
2019-02-19 00:52:06 +01:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
namespace ChocolArm64.Memory
|
|
|
|
{
|
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 20:56:22 +02:00
|
|
|
public unsafe class MemoryManager : ARMeilleure.Memory.IMemoryManager
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
public const int PageBits = 12;
|
|
|
|
public const int PageSize = 1 << PageBits;
|
|
|
|
public const int PageMask = PageSize - 1;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
private const long PteFlagNotModified = 1;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
internal const long PteFlagsMask = 7;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
|
|
|
public IntPtr Ram { get; private set; }
|
|
|
|
|
|
|
|
private byte* _ramPtr;
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
private IntPtr _pageTable;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
internal IntPtr PageTable => _pageTable;
|
2018-12-12 02:48:54 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
internal int PtLevelBits { get; }
|
|
|
|
internal int PtLevelSize { get; }
|
|
|
|
internal int PtLevelMask { get; }
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
public bool HasWriteWatchSupport => MemoryManagement.HasWriteWatchSupport;
|
|
|
|
|
|
|
|
public int AddressSpaceBits { get; }
|
|
|
|
public long AddressSpaceSize { get; }
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
public MemoryManager(
|
|
|
|
IntPtr ram,
|
|
|
|
int addressSpaceBits = 48,
|
|
|
|
bool useFlatPageTable = false)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
Ram = ram;
|
|
|
|
|
|
|
|
_ramPtr = (byte*)ram;
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
AddressSpaceBits = addressSpaceBits;
|
|
|
|
AddressSpaceSize = 1L << addressSpaceBits;
|
|
|
|
|
2019-07-02 04:39:22 +02:00
|
|
|
// When flat page table is requested, we use a single
|
|
|
|
// array for the mappings of the entire address space.
|
|
|
|
// This has better performance, but also high memory usage.
|
|
|
|
// The multi level page table uses 9 bits per level, so
|
|
|
|
// the memory usage is lower, but the performance is also
|
|
|
|
// lower, since each address translation requires multiple reads.
|
2019-02-24 08:24:35 +01:00
|
|
|
if (useFlatPageTable)
|
|
|
|
{
|
|
|
|
PtLevelBits = addressSpaceBits - PageBits;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PtLevelBits = 9;
|
|
|
|
}
|
|
|
|
|
|
|
|
PtLevelSize = 1 << PtLevelBits;
|
|
|
|
PtLevelMask = PtLevelSize - 1;
|
|
|
|
|
|
|
|
_pageTable = Allocate((ulong)(PtLevelSize * IntPtr.Size));
|
|
|
|
}
|
|
|
|
|
|
|
|
public void Map(long va, long pa, long size)
|
|
|
|
{
|
|
|
|
SetPtEntries(va, _ramPtr + pa, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
public void Unmap(long position, long size)
|
|
|
|
{
|
|
|
|
SetPtEntries(position, null, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool IsMapped(long position)
|
|
|
|
{
|
|
|
|
return Translate(position) != IntPtr.Zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
public long GetPhysicalAddress(long virtualAddress)
|
|
|
|
{
|
|
|
|
byte* ptr = (byte*)Translate(virtualAddress);
|
|
|
|
|
|
|
|
return (long)(ptr - _ramPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
private IntPtr Translate(long position)
|
|
|
|
{
|
|
|
|
if (!IsValidPosition(position))
|
|
|
|
{
|
|
|
|
return IntPtr.Zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
byte* ptr = GetPtEntry(position);
|
|
|
|
|
|
|
|
ulong ptrUlong = (ulong)ptr;
|
|
|
|
|
|
|
|
if ((ptrUlong & PteFlagsMask) != 0)
|
|
|
|
{
|
|
|
|
ptrUlong &= ~(ulong)PteFlagsMask;
|
|
|
|
|
|
|
|
ptr = (byte*)ptrUlong;
|
|
|
|
}
|
|
|
|
|
|
|
|
return new IntPtr(ptr + (position & PageMask));
|
|
|
|
}
|
|
|
|
|
|
|
|
private IntPtr TranslateWrite(long position)
|
|
|
|
{
|
|
|
|
if (!IsValidPosition(position))
|
|
|
|
{
|
|
|
|
return IntPtr.Zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
byte* ptr = GetPtEntry(position);
|
|
|
|
|
|
|
|
ulong ptrUlong = (ulong)ptr;
|
|
|
|
|
|
|
|
if ((ptrUlong & PteFlagsMask) != 0)
|
|
|
|
{
|
|
|
|
if ((ptrUlong & PteFlagNotModified) != 0)
|
|
|
|
{
|
|
|
|
ClearPtEntryFlag(position, PteFlagNotModified);
|
|
|
|
}
|
|
|
|
|
|
|
|
ptrUlong &= ~(ulong)PteFlagsMask;
|
|
|
|
|
|
|
|
ptr = (byte*)ptrUlong;
|
|
|
|
}
|
|
|
|
|
|
|
|
return new IntPtr(ptr + (position & PageMask));
|
|
|
|
}
|
|
|
|
|
|
|
|
private byte* GetPtEntry(long position)
|
|
|
|
{
|
|
|
|
return *(byte**)GetPtPtr(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void SetPtEntries(long va, byte* ptr, long size)
|
|
|
|
{
|
|
|
|
long endPosition = (va + size + PageMask) & ~PageMask;
|
|
|
|
|
|
|
|
while ((ulong)va < (ulong)endPosition)
|
|
|
|
{
|
|
|
|
SetPtEntry(va, ptr);
|
|
|
|
|
|
|
|
va += PageSize;
|
|
|
|
|
|
|
|
if (ptr != null)
|
|
|
|
{
|
|
|
|
ptr += PageSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private void SetPtEntry(long position, byte* ptr)
|
|
|
|
{
|
|
|
|
*(byte**)GetPtPtr(position) = ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
private void SetPtEntryFlag(long position, long flag)
|
|
|
|
{
|
|
|
|
ModifyPtEntryFlag(position, flag, setFlag: true);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void ClearPtEntryFlag(long position, long flag)
|
|
|
|
{
|
|
|
|
ModifyPtEntryFlag(position, flag, setFlag: false);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void ModifyPtEntryFlag(long position, long flag, bool setFlag)
|
|
|
|
{
|
|
|
|
IntPtr* pt = (IntPtr*)_pageTable;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
IntPtr* ptPtr = GetPtPtr(position);
|
|
|
|
|
|
|
|
IntPtr old = *ptPtr;
|
|
|
|
|
|
|
|
long modified = old.ToInt64();
|
|
|
|
|
|
|
|
if (setFlag)
|
|
|
|
{
|
|
|
|
modified |= flag;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
modified &= ~flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
IntPtr origValue = Interlocked.CompareExchange(ref *ptPtr, new IntPtr(modified), old);
|
|
|
|
|
|
|
|
if (origValue == old)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private IntPtr* GetPtPtr(long position)
|
|
|
|
{
|
|
|
|
if (!IsValidPosition(position))
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(position));
|
|
|
|
}
|
|
|
|
|
|
|
|
IntPtr nextPtr = _pageTable;
|
|
|
|
|
|
|
|
IntPtr* ptePtr = null;
|
|
|
|
|
|
|
|
int bit = PageBits;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
long index = (position >> bit) & PtLevelMask;
|
|
|
|
|
|
|
|
ptePtr = &((IntPtr*)nextPtr)[index];
|
|
|
|
|
|
|
|
bit += PtLevelBits;
|
|
|
|
|
|
|
|
if (bit >= AddressSpaceBits)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
nextPtr = *ptePtr;
|
|
|
|
|
|
|
|
if (nextPtr == IntPtr.Zero)
|
|
|
|
{
|
2019-07-02 04:39:22 +02:00
|
|
|
// Entry does not yet exist, allocate a new one.
|
2019-02-24 08:24:35 +01:00
|
|
|
IntPtr newPtr = Allocate((ulong)(PtLevelSize * IntPtr.Size));
|
|
|
|
|
2019-07-02 04:39:22 +02:00
|
|
|
// Try to swap the current pointer (should be zero), with the allocated one.
|
2019-02-24 08:24:35 +01:00
|
|
|
nextPtr = Interlocked.Exchange(ref *ptePtr, newPtr);
|
|
|
|
|
2019-07-02 04:39:22 +02:00
|
|
|
// If the old pointer is not null, then another thread already has set it.
|
2019-02-24 08:24:35 +01:00
|
|
|
if (nextPtr != IntPtr.Zero)
|
|
|
|
{
|
|
|
|
Free(newPtr);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
nextPtr = newPtr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptePtr;
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool IsRegionModified(long position, long size)
|
|
|
|
{
|
|
|
|
if (!HasWriteWatchSupport)
|
|
|
|
{
|
|
|
|
return IsRegionModifiedFallback(position, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
IntPtr address = Translate(position);
|
|
|
|
|
|
|
|
IntPtr baseAddr = address;
|
|
|
|
IntPtr expectedAddr = address;
|
|
|
|
|
|
|
|
long pendingPages = 0;
|
|
|
|
|
|
|
|
long pages = size / PageSize;
|
|
|
|
|
|
|
|
bool modified = false;
|
|
|
|
|
|
|
|
bool IsAnyPageModified()
|
|
|
|
{
|
|
|
|
IntPtr pendingSize = new IntPtr(pendingPages * PageSize);
|
|
|
|
|
|
|
|
IntPtr[] addresses = new IntPtr[pendingPages];
|
|
|
|
|
|
|
|
bool result = GetModifiedPages(baseAddr, pendingSize, addresses, out ulong count);
|
|
|
|
|
|
|
|
if (result)
|
|
|
|
{
|
|
|
|
return count != 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pages-- > 0)
|
|
|
|
{
|
|
|
|
if (address != expectedAddr)
|
|
|
|
{
|
|
|
|
modified |= IsAnyPageModified();
|
|
|
|
|
|
|
|
baseAddr = address;
|
|
|
|
|
|
|
|
pendingPages = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedAddr = address + PageSize;
|
|
|
|
|
|
|
|
pendingPages++;
|
|
|
|
|
|
|
|
if (pages == 0)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
position += PageSize;
|
|
|
|
|
|
|
|
address = Translate(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pendingPages != 0)
|
|
|
|
{
|
|
|
|
modified |= IsAnyPageModified();
|
|
|
|
}
|
|
|
|
|
|
|
|
return modified;
|
|
|
|
}
|
|
|
|
|
|
|
|
private unsafe bool IsRegionModifiedFallback(long position, long size)
|
|
|
|
{
|
|
|
|
long endAddr = (position + size + PageMask) & ~PageMask;
|
|
|
|
|
|
|
|
bool modified = false;
|
|
|
|
|
|
|
|
while ((ulong)position < (ulong)endAddr)
|
|
|
|
{
|
|
|
|
if (IsValidPosition(position))
|
|
|
|
{
|
|
|
|
byte* ptr = ((byte**)_pageTable)[position >> PageBits];
|
|
|
|
|
|
|
|
ulong ptrUlong = (ulong)ptr;
|
|
|
|
|
|
|
|
if ((ptrUlong & PteFlagNotModified) == 0)
|
|
|
|
{
|
|
|
|
modified = true;
|
|
|
|
|
|
|
|
SetPtEntryFlag(position, PteFlagNotModified);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
position += PageSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
return modified;
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool TryGetHostAddress(long position, long size, out IntPtr ptr)
|
|
|
|
{
|
|
|
|
if (IsContiguous(position, size))
|
|
|
|
{
|
|
|
|
ptr = (IntPtr)Translate(position);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr = IntPtr.Zero;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
private bool IsContiguous(long position, long size)
|
|
|
|
{
|
|
|
|
long endPos = position + size;
|
|
|
|
|
|
|
|
position &= ~PageMask;
|
|
|
|
|
|
|
|
long expectedPa = GetPhysicalAddress(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
while ((ulong)position < (ulong)endPos)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
long pa = GetPhysicalAddress(position);
|
|
|
|
|
|
|
|
if (pa != expectedPa)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
position += PageSize;
|
|
|
|
expectedPa += PageSize;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
2019-02-24 08:24:35 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool IsValidPosition(long position)
|
|
|
|
{
|
|
|
|
return (ulong)position < (ulong)AddressSpaceSize;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
internal bool AtomicCompareExchange2xInt32(
|
|
|
|
long position,
|
|
|
|
int expectedLow,
|
|
|
|
int expectedHigh,
|
|
|
|
int desiredLow,
|
|
|
|
int desiredHigh)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
long expected = (uint)expectedLow;
|
|
|
|
long desired = (uint)desiredLow;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
expected |= (long)expectedHigh << 32;
|
|
|
|
desired |= (long)desiredHigh << 32;
|
|
|
|
|
|
|
|
return AtomicCompareExchangeInt64(position, expected, desired);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
internal bool AtomicCompareExchangeInt128(
|
|
|
|
long position,
|
|
|
|
ulong expectedLow,
|
|
|
|
ulong expectedHigh,
|
|
|
|
ulong desiredLow,
|
|
|
|
ulong desiredHigh)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
if ((position & 0xf) != 0)
|
|
|
|
{
|
|
|
|
AbortWithAlignmentFault(position);
|
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
IntPtr ptr = TranslateWrite(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
return InterlockedCompareExchange128(ptr, expectedLow, expectedHigh, desiredLow, desiredHigh);
|
|
|
|
}
|
|
|
|
|
|
|
|
internal Vector128<float> AtomicReadInt128(long position)
|
|
|
|
{
|
|
|
|
if ((position & 0xf) != 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
2019-02-19 00:52:06 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
IntPtr ptr = Translate(position);
|
2019-02-19 00:52:06 +01:00
|
|
|
|
|
|
|
InterlockedRead128(ptr, out ulong low, out ulong high);
|
|
|
|
|
|
|
|
Vector128<float> vector = default(Vector128<float>);
|
|
|
|
|
|
|
|
vector = VectorHelper.VectorInsertInt(low, vector, 0, 3);
|
|
|
|
vector = VectorHelper.VectorInsertInt(high, vector, 1, 3);
|
|
|
|
|
|
|
|
return vector;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
public bool AtomicCompareExchangeByte(long position, byte expected, byte desired)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
int* ptr = (int*)Translate(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
int currentValue = *ptr;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
int expected32 = (currentValue & ~byte.MaxValue) | expected;
|
|
|
|
int desired32 = (currentValue & ~byte.MaxValue) | desired;
|
2018-12-11 01:58:52 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
return Interlocked.CompareExchange(ref *ptr, desired32, expected32) == expected32;
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool AtomicCompareExchangeInt16(long position, short expected, short desired)
|
|
|
|
{
|
|
|
|
if ((position & 1) != 0)
|
|
|
|
{
|
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
int* ptr = (int*)Translate(position);
|
|
|
|
|
|
|
|
int currentValue = *ptr;
|
|
|
|
|
|
|
|
int expected32 = (currentValue & ~ushort.MaxValue) | (ushort)expected;
|
|
|
|
int desired32 = (currentValue & ~ushort.MaxValue) | (ushort)desired;
|
|
|
|
|
|
|
|
return Interlocked.CompareExchange(ref *ptr, desired32, expected32) == expected32;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
public bool AtomicCompareExchangeInt32(long position, int expected, int desired)
|
|
|
|
{
|
|
|
|
if ((position & 3) != 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
int* ptr = (int*)TranslateWrite(position);
|
|
|
|
|
|
|
|
return Interlocked.CompareExchange(ref *ptr, desired, expected) == expected;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
public bool AtomicCompareExchangeInt64(long position, long expected, long desired)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
if ((position & 7) != 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
long* ptr = (long*)TranslateWrite(position);
|
|
|
|
|
|
|
|
return Interlocked.CompareExchange(ref *ptr, desired, expected) == expected;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
public int AtomicIncrementInt32(long position)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
if ((position & 3) != 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
2019-02-19 00:52:06 +01:00
|
|
|
|
|
|
|
int* ptr = (int*)TranslateWrite(position);
|
|
|
|
|
|
|
|
return Interlocked.Increment(ref *ptr);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-19 00:52:06 +01:00
|
|
|
public int AtomicDecrementInt32(long position)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
if ((position & 3) != 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-19 00:52:06 +01:00
|
|
|
AbortWithAlignmentFault(position);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
2019-02-19 00:52:06 +01:00
|
|
|
|
|
|
|
int* ptr = (int*)TranslateWrite(position);
|
|
|
|
|
|
|
|
return Interlocked.Decrement(ref *ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
private void AbortWithAlignmentFault(long position)
|
|
|
|
{
|
2019-07-02 04:39:22 +02:00
|
|
|
// TODO: Abort mode and exception support on the CPU.
|
2019-02-19 00:52:06 +01:00
|
|
|
throw new InvalidOperationException($"Tried to compare exchange a misaligned address 0x{position:X16}.");
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public sbyte ReadSByte(long position)
|
|
|
|
{
|
|
|
|
return (sbyte)ReadByte(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
public short ReadInt16(long position)
|
|
|
|
{
|
|
|
|
return (short)ReadUInt16(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
public int ReadInt32(long position)
|
|
|
|
{
|
|
|
|
return (int)ReadUInt32(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
public long ReadInt64(long position)
|
|
|
|
{
|
|
|
|
return (long)ReadUInt64(position);
|
|
|
|
}
|
|
|
|
|
|
|
|
public byte ReadByte(long position)
|
|
|
|
{
|
|
|
|
return *((byte*)Translate(position));
|
|
|
|
}
|
|
|
|
|
|
|
|
public ushort ReadUInt16(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 1) == 0)
|
|
|
|
{
|
|
|
|
return *((ushort*)Translate(position));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return (ushort)(ReadByte(position + 0) << 0 |
|
|
|
|
ReadByte(position + 1) << 8);
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public uint ReadUInt32(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 3) == 0)
|
|
|
|
{
|
|
|
|
return *((uint*)Translate(position));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return (uint)(ReadUInt16(position + 0) << 0 |
|
|
|
|
ReadUInt16(position + 2) << 16);
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public ulong ReadUInt64(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 7) == 0)
|
|
|
|
{
|
|
|
|
return *((ulong*)Translate(position));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return (ulong)ReadUInt32(position + 0) << 0 |
|
|
|
|
(ulong)ReadUInt32(position + 4) << 32;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public Vector128<float> ReadVector8(long position)
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse.StaticCast<byte, float>(Sse2.SetVector128(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ReadByte(position)));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
Vector128<float> value = VectorHelper.VectorSingleZero();
|
|
|
|
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadByte(position), value, 0, 0);
|
|
|
|
|
|
|
|
return value;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public Vector128<float> ReadVector16(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse2.IsSupported && (position & 1) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
return Sse.StaticCast<ushort, float>(Sse2.Insert(Sse2.SetZeroVector128<ushort>(), ReadUInt16(position), 0));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
Vector128<float> value = VectorHelper.VectorSingleZero();
|
|
|
|
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadUInt16(position), value, 0, 1);
|
|
|
|
|
|
|
|
return value;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public Vector128<float> ReadVector32(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse.IsSupported && (position & 3) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
return Sse.LoadScalarVector128((float*)Translate(position));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
Vector128<float> value = VectorHelper.VectorSingleZero();
|
|
|
|
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadUInt32(position), value, 0, 2);
|
|
|
|
|
|
|
|
return value;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public Vector128<float> ReadVector64(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse2.IsSupported && (position & 7) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
return Sse.StaticCast<double, float>(Sse2.LoadScalarVector128((double*)Translate(position)));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
Vector128<float> value = VectorHelper.VectorSingleZero();
|
|
|
|
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadUInt64(position), value, 0, 3);
|
|
|
|
|
|
|
|
return value;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public Vector128<float> ReadVector128(long position)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse.IsSupported && (position & 15) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
return Sse.LoadVector128((float*)Translate(position));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
Vector128<float> value = VectorHelper.VectorSingleZero();
|
|
|
|
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadUInt64(position + 0), value, 0, 3);
|
|
|
|
value = VectorHelper.VectorInsertInt(ReadUInt64(position + 8), value, 1, 3);
|
|
|
|
|
|
|
|
return value;
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public byte[] ReadBytes(long position, long size)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
long endAddr = position + size;
|
|
|
|
|
|
|
|
if ((ulong)size > int.MaxValue)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
|
|
|
}
|
|
|
|
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((ulong)endAddr < (ulong)position)
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(position));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
|
|
|
|
byte[] data = new byte[size];
|
|
|
|
|
2018-11-28 23:18:09 +01:00
|
|
|
int offset = 0;
|
|
|
|
|
|
|
|
while ((ulong)position < (ulong)endAddr)
|
|
|
|
{
|
|
|
|
long pageLimit = (position + PageSize) & ~(long)PageMask;
|
|
|
|
|
|
|
|
if ((ulong)pageLimit > (ulong)endAddr)
|
|
|
|
{
|
|
|
|
pageLimit = endAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int copySize = (int)(pageLimit - position);
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
Marshal.Copy(Translate(position), data, offset, copySize);
|
2018-11-28 23:18:09 +01:00
|
|
|
|
|
|
|
position += copySize;
|
|
|
|
offset += copySize;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
public void ReadBytes(long position, byte[] data, int startIndex, int size)
|
|
|
|
{
|
2019-07-02 04:39:22 +02:00
|
|
|
// Note: This will be moved later.
|
2018-11-28 23:18:09 +01:00
|
|
|
long endAddr = position + size;
|
|
|
|
|
|
|
|
if ((ulong)size > int.MaxValue)
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((ulong)endAddr < (ulong)position)
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(position));
|
|
|
|
}
|
|
|
|
|
|
|
|
int offset = startIndex;
|
|
|
|
|
|
|
|
while ((ulong)position < (ulong)endAddr)
|
|
|
|
{
|
|
|
|
long pageLimit = (position + PageSize) & ~(long)PageMask;
|
|
|
|
|
|
|
|
if ((ulong)pageLimit > (ulong)endAddr)
|
|
|
|
{
|
|
|
|
pageLimit = endAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int copySize = (int)(pageLimit - position);
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
Marshal.Copy(Translate(position), data, offset, copySize);
|
2018-11-28 23:18:09 +01:00
|
|
|
|
|
|
|
position += copySize;
|
|
|
|
offset += copySize;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteSByte(long position, sbyte value)
|
|
|
|
{
|
|
|
|
WriteByte(position, (byte)value);
|
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteInt16(long position, short value)
|
|
|
|
{
|
|
|
|
WriteUInt16(position, (ushort)value);
|
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteInt32(long position, int value)
|
|
|
|
{
|
|
|
|
WriteUInt32(position, (uint)value);
|
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteInt64(long position, long value)
|
|
|
|
{
|
|
|
|
WriteUInt64(position, (ulong)value);
|
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteByte(long position, byte value)
|
|
|
|
{
|
|
|
|
*((byte*)TranslateWrite(position)) = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteUInt16(long position, ushort value)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 1) == 0)
|
|
|
|
{
|
|
|
|
*((ushort*)TranslateWrite(position)) = value;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
WriteByte(position + 0, (byte)(value >> 0));
|
|
|
|
WriteByte(position + 1, (byte)(value >> 8));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteUInt32(long position, uint value)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 3) == 0)
|
|
|
|
{
|
|
|
|
*((uint*)TranslateWrite(position)) = value;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
WriteUInt16(position + 0, (ushort)(value >> 0));
|
|
|
|
WriteUInt16(position + 2, (ushort)(value >> 16));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteUInt64(long position, ulong value)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((position & 7) == 0)
|
|
|
|
{
|
|
|
|
*((ulong*)TranslateWrite(position)) = value;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
WriteUInt32(position + 0, (uint)(value >> 0));
|
|
|
|
WriteUInt32(position + 4, (uint)(value >> 32));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public void WriteVector8(long position, Vector128<float> value)
|
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
|
|
|
WriteByte(position, Sse41.Extract(Sse.StaticCast<float, byte>(value), 0));
|
|
|
|
}
|
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
WriteByte(position, (byte)Sse2.Extract(Sse.StaticCast<float, ushort>(value), 0));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
WriteByte(position, (byte)VectorHelper.VectorExtractIntZx(value, 0, 0));
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public void WriteVector16(long position, Vector128<float> value)
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
WriteUInt16(position, Sse2.Extract(Sse.StaticCast<float, ushort>(value), 0));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
WriteUInt16(position, (ushort)VectorHelper.VectorExtractIntZx(value, 0, 1));
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public void WriteVector32(long position, Vector128<float> value)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse.IsSupported && (position & 3) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
Sse.StoreScalar((float*)TranslateWrite(position), value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
WriteUInt32(position, (uint)VectorHelper.VectorExtractIntZx(value, 0, 2));
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public void WriteVector64(long position, Vector128<float> value)
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse2.IsSupported && (position & 7) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
Sse2.StoreScalar((double*)TranslateWrite(position), Sse.StaticCast<float, double>(value));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
WriteUInt64(position, VectorHelper.VectorExtractIntZx(value, 0, 3));
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 20:56:22 +02:00
|
|
|
public void WriteVector128Internal(long position, Vector128<float> value)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
if (Sse.IsSupported && (position & 15) == 0)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
|
|
|
Sse.Store((float*)TranslateWrite(position), value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-11-28 23:18:09 +01:00
|
|
|
WriteUInt64(position + 0, VectorHelper.VectorExtractIntZx(value, 0, 3));
|
|
|
|
WriteUInt64(position + 8, VectorHelper.VectorExtractIntZx(value, 1, 3));
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
2019-08-08 20:56:22 +02:00
|
|
|
public void WriteVector128(long position, ARMeilleure.State.V128 value)
|
|
|
|
{
|
|
|
|
WriteUInt64(position + 0, value.GetUInt64(0));
|
|
|
|
WriteUInt64(position + 8, value.GetUInt64(1));
|
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
public void WriteBytes(long position, byte[] data)
|
|
|
|
{
|
2018-11-17 05:01:31 +01:00
|
|
|
long endAddr = position + data.Length;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2018-11-17 05:01:31 +01:00
|
|
|
if ((ulong)endAddr < (ulong)position)
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(position));
|
|
|
|
}
|
|
|
|
|
|
|
|
int offset = 0;
|
|
|
|
|
|
|
|
while ((ulong)position < (ulong)endAddr)
|
|
|
|
{
|
|
|
|
long pageLimit = (position + PageSize) & ~(long)PageMask;
|
|
|
|
|
|
|
|
if ((ulong)pageLimit > (ulong)endAddr)
|
|
|
|
{
|
|
|
|
pageLimit = endAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int copySize = (int)(pageLimit - position);
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
Marshal.Copy(data, offset, TranslateWrite(position), copySize);
|
2018-11-17 05:01:31 +01:00
|
|
|
|
|
|
|
position += copySize;
|
|
|
|
offset += copySize;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void WriteBytes(long position, byte[] data, int startIndex, int size)
|
|
|
|
{
|
2019-07-02 04:39:22 +02:00
|
|
|
// Note: This will be moved later.
|
2018-11-28 23:18:09 +01:00
|
|
|
long endAddr = position + size;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2018-11-28 23:18:09 +01:00
|
|
|
if ((ulong)endAddr < (ulong)position)
|
|
|
|
{
|
|
|
|
throw new ArgumentOutOfRangeException(nameof(position));
|
|
|
|
}
|
|
|
|
|
|
|
|
int offset = startIndex;
|
|
|
|
|
|
|
|
while ((ulong)position < (ulong)endAddr)
|
|
|
|
{
|
|
|
|
long pageLimit = (position + PageSize) & ~(long)PageMask;
|
|
|
|
|
|
|
|
if ((ulong)pageLimit > (ulong)endAddr)
|
|
|
|
{
|
|
|
|
pageLimit = endAddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int copySize = (int)(pageLimit - position);
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
Marshal.Copy(data, offset, Translate(position), copySize);
|
2018-11-28 23:18:09 +01:00
|
|
|
|
|
|
|
position += copySize;
|
|
|
|
offset += copySize;
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
public void CopyBytes(long src, long dst, long size)
|
|
|
|
{
|
2019-07-02 04:39:22 +02:00
|
|
|
// Note: This will be moved later.
|
2018-11-28 23:18:09 +01:00
|
|
|
if (IsContiguous(src, size) &&
|
|
|
|
IsContiguous(dst, size))
|
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
byte* srcPtr = (byte*)Translate(src);
|
|
|
|
byte* dstPtr = (byte*)Translate(dst);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2018-11-28 23:18:09 +01:00
|
|
|
Buffer.MemoryCopy(srcPtr, dstPtr, size, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
WriteBytes(dst, ReadBytes(src, size));
|
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
public void Dispose()
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
Dispose(true);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
protected virtual void Dispose(bool disposing)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
IntPtr ptr = Interlocked.Exchange(ref _pageTable, IntPtr.Zero);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
if (ptr != IntPtr.Zero)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
FreePageTableEntry(ptr, PageBits);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
private void FreePageTableEntry(IntPtr ptr, int levelBitEnd)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
levelBitEnd += PtLevelBits;
|
2018-10-31 02:43:02 +01:00
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
if (levelBitEnd >= AddressSpaceBits)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
Free(ptr);
|
2018-10-31 02:43:02 +01:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
for (int index = 0; index < PtLevelSize; index++)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
IntPtr ptePtr = ((IntPtr*)ptr)[index];
|
|
|
|
|
|
|
|
if (ptePtr != IntPtr.Zero)
|
2018-10-31 02:43:02 +01:00
|
|
|
{
|
2019-02-24 08:24:35 +01:00
|
|
|
FreePageTableEntry(ptePtr, levelBitEnd);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-24 08:24:35 +01:00
|
|
|
Free(ptr);
|
2018-10-31 02:43:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|