2022-04-23 10:59:50 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2021-12-05 22:18:53 +01:00
|
|
|
|
2021-06-05 10:07:26 +02:00
|
|
|
#ifdef _WIN32
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
#include <iterator>
|
|
|
|
#include <unordered_map>
|
2021-06-08 07:14:12 +02:00
|
|
|
#include <boost/icl/separate_interval_set.hpp>
|
|
|
|
#include <windows.h>
|
|
|
|
#include "common/dynamic_library.h"
|
2021-06-05 11:23:25 +02:00
|
|
|
|
2021-07-27 22:00:09 +02:00
|
|
|
#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv
|
2021-06-05 10:07:26 +02:00
|
|
|
|
2022-12-18 08:29:15 +01:00
|
|
|
#ifdef ANDROID
|
|
|
|
#include <android/sharedmem.h>
|
|
|
|
#endif
|
|
|
|
|
2021-06-05 10:07:26 +02:00
|
|
|
#ifndef _GNU_SOURCE
|
|
|
|
#define _GNU_SOURCE
|
|
|
|
#endif
|
2023-06-01 17:21:22 +02:00
|
|
|
#include <boost/icl/interval_set.hpp>
|
2021-06-05 10:07:26 +02:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/mman.h>
|
2023-11-17 19:33:43 +01:00
|
|
|
#include <sys/random.h>
|
2021-06-05 10:07:26 +02:00
|
|
|
#include <unistd.h>
|
2022-03-19 06:50:03 +01:00
|
|
|
#include "common/scope_exit.h"
|
2021-06-05 10:07:26 +02:00
|
|
|
|
|
|
|
#endif // ^^^ Linux ^^^
|
2021-06-05 11:23:25 +02:00
|
|
|
|
|
|
|
#include <mutex>
|
2023-11-17 19:33:43 +01:00
|
|
|
#include <random>
|
2021-06-05 11:23:25 +02:00
|
|
|
|
2021-06-05 11:47:08 +02:00
|
|
|
#include "common/alignment.h"
|
2021-06-05 11:23:25 +02:00
|
|
|
#include "common/assert.h"
|
2023-11-17 19:57:39 +01:00
|
|
|
#include "common/free_region_manager.h"
|
2021-06-05 11:23:25 +02:00
|
|
|
#include "common/host_memory.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
|
|
|
|
namespace Common {
|
|
|
|
|
|
|
|
constexpr size_t PageAlignment = 0x1000;
|
2021-06-05 11:47:08 +02:00
|
|
|
constexpr size_t HugePageSize = 0x200000;
|
2021-06-05 11:23:25 +02:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
|
2021-06-08 07:14:12 +02:00
|
|
|
// Manually imported for MinGW compatibility
|
|
|
|
#ifndef MEM_RESERVE_PLACEHOLDER
|
2021-06-19 10:38:33 +02:00
|
|
|
#define MEM_RESERVE_PLACEHOLDER 0x00040000
|
2021-06-08 07:14:12 +02:00
|
|
|
#endif
|
|
|
|
#ifndef MEM_REPLACE_PLACEHOLDER
|
|
|
|
#define MEM_REPLACE_PLACEHOLDER 0x00004000
|
|
|
|
#endif
|
|
|
|
#ifndef MEM_COALESCE_PLACEHOLDERS
|
|
|
|
#define MEM_COALESCE_PLACEHOLDERS 0x00000001
|
|
|
|
#endif
|
|
|
|
#ifndef MEM_PRESERVE_PLACEHOLDER
|
|
|
|
#define MEM_PRESERVE_PLACEHOLDER 0x00000002
|
|
|
|
#endif
|
|
|
|
|
|
|
|
using PFN_CreateFileMapping2 = _Ret_maybenull_ HANDLE(WINAPI*)(
|
|
|
|
_In_ HANDLE File, _In_opt_ SECURITY_ATTRIBUTES* SecurityAttributes, _In_ ULONG DesiredAccess,
|
|
|
|
_In_ ULONG PageProtection, _In_ ULONG AllocationAttributes, _In_ ULONG64 MaximumSize,
|
|
|
|
_In_opt_ PCWSTR Name,
|
|
|
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
|
|
|
|
_In_ ULONG ParameterCount);
|
|
|
|
|
|
|
|
using PFN_VirtualAlloc2 = _Ret_maybenull_ PVOID(WINAPI*)(
|
|
|
|
_In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, _In_ SIZE_T Size,
|
|
|
|
_In_ ULONG AllocationType, _In_ ULONG PageProtection,
|
|
|
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
|
|
|
|
_In_ ULONG ParameterCount);
|
|
|
|
|
|
|
|
using PFN_MapViewOfFile3 = _Ret_maybenull_ PVOID(WINAPI*)(
|
|
|
|
_In_ HANDLE FileMapping, _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress,
|
|
|
|
_In_ ULONG64 Offset, _In_ SIZE_T ViewSize, _In_ ULONG AllocationType, _In_ ULONG PageProtection,
|
|
|
|
_Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
|
|
|
|
_In_ ULONG ParameterCount);
|
|
|
|
|
|
|
|
using PFN_UnmapViewOfFile2 = BOOL(WINAPI*)(_In_ HANDLE Process, _In_ PVOID BaseAddress,
|
|
|
|
_In_ ULONG UnmapFlags);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static void GetFuncAddress(Common::DynamicLibrary& dll, const char* name, T& pfn) {
|
|
|
|
if (!dll.GetSymbol(name, &pfn)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to load {}", name);
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
class HostMemory::Impl {
|
|
|
|
public:
|
|
|
|
explicit Impl(size_t backing_size_, size_t virtual_size_)
|
2021-06-08 07:14:12 +02:00
|
|
|
: backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()},
|
|
|
|
kernelbase_dll("Kernelbase") {
|
|
|
|
if (!kernelbase_dll.IsOpen()) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to load Kernelbase.dll");
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
GetFuncAddress(kernelbase_dll, "CreateFileMapping2", pfn_CreateFileMapping2);
|
|
|
|
GetFuncAddress(kernelbase_dll, "VirtualAlloc2", pfn_VirtualAlloc2);
|
|
|
|
GetFuncAddress(kernelbase_dll, "MapViewOfFile3", pfn_MapViewOfFile3);
|
|
|
|
GetFuncAddress(kernelbase_dll, "UnmapViewOfFile2", pfn_UnmapViewOfFile2);
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
// Allocate backing file map
|
|
|
|
backing_handle =
|
2021-06-08 07:14:12 +02:00
|
|
|
pfn_CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
|
|
|
|
PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0);
|
2021-06-05 11:23:25 +02:00
|
|
|
if (!backing_handle) {
|
2021-06-08 07:14:12 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory",
|
|
|
|
backing_size >> 20);
|
2021-06-05 11:23:25 +02:00
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
// Allocate a virtual memory for the backing file map as placeholder
|
2021-06-08 07:14:12 +02:00
|
|
|
backing_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, backing_size,
|
|
|
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
|
|
|
PAGE_NOACCESS, nullptr, 0));
|
2021-06-05 11:23:25 +02:00
|
|
|
if (!backing_base) {
|
|
|
|
Release();
|
2021-06-08 07:14:12 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory",
|
|
|
|
backing_size >> 20);
|
2021-06-05 11:23:25 +02:00
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
// Map backing placeholder
|
2021-06-08 07:14:12 +02:00
|
|
|
void* const ret = pfn_MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size,
|
|
|
|
MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
|
2021-06-05 11:23:25 +02:00
|
|
|
if (ret != backing_base) {
|
|
|
|
Release();
|
2021-06-08 07:14:12 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20);
|
2021-06-05 11:23:25 +02:00
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
// Allocate virtual address placeholder
|
2021-06-08 07:14:12 +02:00
|
|
|
virtual_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, virtual_size,
|
|
|
|
MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
|
|
|
|
PAGE_NOACCESS, nullptr, 0));
|
2021-06-05 11:23:25 +02:00
|
|
|
if (!virtual_base) {
|
|
|
|
Release();
|
2021-06-08 07:14:12 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to reserve {} GiB of virtual memory",
|
|
|
|
virtual_size >> 30);
|
2021-06-05 11:23:25 +02:00
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Impl() {
|
|
|
|
Release();
|
|
|
|
}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) {
|
2021-06-05 11:23:25 +02:00
|
|
|
std::unique_lock lock{placeholder_mutex};
|
|
|
|
if (!IsNiechePlaceholder(virtual_offset, length)) {
|
|
|
|
Split(virtual_offset, length);
|
|
|
|
}
|
|
|
|
ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end());
|
|
|
|
TrackPlaceholder(virtual_offset, host_offset, length);
|
|
|
|
|
|
|
|
MapView(virtual_offset, host_offset, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unmap(size_t virtual_offset, size_t length) {
|
2022-04-07 20:30:55 +02:00
|
|
|
std::scoped_lock lock{placeholder_mutex};
|
2021-06-05 11:23:25 +02:00
|
|
|
|
|
|
|
// Unmap until there are no more placeholders
|
|
|
|
while (UnmapOnePlaceholder(virtual_offset, length)) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
2021-06-05 11:23:25 +02:00
|
|
|
DWORD new_flags{};
|
|
|
|
if (read && write) {
|
|
|
|
new_flags = PAGE_READWRITE;
|
|
|
|
} else if (read && !write) {
|
|
|
|
new_flags = PAGE_READONLY;
|
|
|
|
} else if (!read && !write) {
|
|
|
|
new_flags = PAGE_NOACCESS;
|
|
|
|
} else {
|
|
|
|
UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
|
|
|
|
}
|
2021-06-07 01:53:26 +02:00
|
|
|
const size_t virtual_end = virtual_offset + length;
|
|
|
|
|
2022-04-07 20:30:55 +02:00
|
|
|
std::scoped_lock lock{placeholder_mutex};
|
2021-06-07 01:53:26 +02:00
|
|
|
auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end});
|
|
|
|
while (it != end) {
|
|
|
|
const size_t offset = std::max(it->lower(), virtual_offset);
|
|
|
|
const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
|
|
|
|
DWORD old_flags{};
|
|
|
|
if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules");
|
|
|
|
}
|
|
|
|
++it;
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t backing_size; ///< Size of the backing memory in bytes
|
|
|
|
const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
|
|
|
|
|
|
|
|
u8* backing_base{};
|
|
|
|
u8* virtual_base{};
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// Release all resources in the object
|
|
|
|
void Release() {
|
|
|
|
if (!placeholders.empty()) {
|
|
|
|
for (const auto& placeholder : placeholders) {
|
2021-06-08 07:14:12 +02:00
|
|
|
if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder.lower(),
|
|
|
|
MEM_PRESERVE_PLACEHOLDER)) {
|
2021-06-05 11:23:25 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Coalesce(0, virtual_size);
|
|
|
|
}
|
|
|
|
if (virtual_base) {
|
|
|
|
if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to free virtual memory");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (backing_base) {
|
2021-06-08 07:14:12 +02:00
|
|
|
if (!pfn_UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) {
|
2021-06-05 11:23:25 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder");
|
|
|
|
}
|
|
|
|
if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to free backing memory");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!CloseHandle(backing_handle)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unmap one placeholder in the given range (partial unmaps are supported)
|
|
|
|
/// Return true when there are no more placeholders to unmap
|
|
|
|
bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) {
|
|
|
|
const auto it = placeholders.find({virtual_offset, virtual_offset + length});
|
|
|
|
const auto begin = placeholders.begin();
|
|
|
|
const auto end = placeholders.end();
|
|
|
|
if (it == end) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const size_t placeholder_begin = it->lower();
|
|
|
|
const size_t placeholder_end = it->upper();
|
|
|
|
const size_t unmap_begin = std::max(virtual_offset, placeholder_begin);
|
|
|
|
const size_t unmap_end = std::min(virtual_offset + length, placeholder_end);
|
|
|
|
ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end);
|
|
|
|
ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin);
|
|
|
|
|
|
|
|
const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin);
|
|
|
|
ASSERT(host_pointer_it != placeholder_host_pointers.end());
|
|
|
|
const size_t host_offset = host_pointer_it->second;
|
|
|
|
|
|
|
|
const bool split_left = unmap_begin > placeholder_begin;
|
|
|
|
const bool split_right = unmap_end < placeholder_end;
|
|
|
|
|
2021-06-08 07:14:12 +02:00
|
|
|
if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder_begin,
|
|
|
|
MEM_PRESERVE_PLACEHOLDER)) {
|
2021-06-05 11:23:25 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder");
|
|
|
|
}
|
|
|
|
// If we have to remap memory regions due to partial unmaps, we are in a data race as
|
|
|
|
// Windows doesn't support remapping memory without unmapping first. Avoid adding any extra
|
|
|
|
// logic within the panic region described below.
|
|
|
|
|
|
|
|
// Panic region, we are in a data race right now
|
|
|
|
if (split_left || split_right) {
|
|
|
|
Split(unmap_begin, unmap_end - unmap_begin);
|
|
|
|
}
|
|
|
|
if (split_left) {
|
|
|
|
MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin);
|
|
|
|
}
|
|
|
|
if (split_right) {
|
|
|
|
MapView(unmap_end, host_offset + unmap_end - placeholder_begin,
|
|
|
|
placeholder_end - unmap_end);
|
|
|
|
}
|
|
|
|
// End panic region
|
|
|
|
|
|
|
|
size_t coalesce_begin = unmap_begin;
|
|
|
|
if (!split_left) {
|
|
|
|
// Try to coalesce pages to the left
|
|
|
|
coalesce_begin = it == begin ? 0 : std::prev(it)->upper();
|
|
|
|
if (coalesce_begin != placeholder_begin) {
|
|
|
|
Coalesce(coalesce_begin, unmap_end - coalesce_begin);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!split_right) {
|
|
|
|
// Try to coalesce pages to the right
|
|
|
|
const auto next = std::next(it);
|
|
|
|
const size_t next_begin = next == end ? virtual_size : next->lower();
|
|
|
|
if (placeholder_end != next_begin) {
|
|
|
|
// We can coalesce to the right
|
|
|
|
Coalesce(coalesce_begin, next_begin - coalesce_begin);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Remove and reinsert placeholder trackers
|
|
|
|
UntrackPlaceholder(it);
|
|
|
|
if (split_left) {
|
|
|
|
TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin);
|
|
|
|
}
|
|
|
|
if (split_right) {
|
|
|
|
TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin,
|
|
|
|
placeholder_end - unmap_end);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MapView(size_t virtual_offset, size_t host_offset, size_t length) {
|
2021-06-08 07:14:12 +02:00
|
|
|
if (!pfn_MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset,
|
|
|
|
length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) {
|
2021-06-05 11:23:25 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to map placeholder");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Split(size_t virtual_offset, size_t length) {
|
|
|
|
if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length,
|
|
|
|
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to split placeholder");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Coalesce(size_t virtual_offset, size_t length) {
|
|
|
|
if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length,
|
|
|
|
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) {
|
|
|
|
placeholders.insert({virtual_offset, virtual_offset + length});
|
|
|
|
placeholder_host_pointers.emplace(virtual_offset, host_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) {
|
|
|
|
placeholder_host_pointers.erase(it->lower());
|
2021-09-29 12:58:48 +02:00
|
|
|
placeholders.erase(it);
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true when a given memory region is a "nieche" and the placeholders don't have to be
|
2023-03-12 04:10:38 +01:00
|
|
|
/// split.
|
2021-06-05 11:23:25 +02:00
|
|
|
bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
|
|
|
|
const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
|
|
|
|
if (it != placeholders.end() && it->lower() == virtual_offset + length) {
|
2022-03-03 00:31:15 +01:00
|
|
|
return it == placeholders.begin() ? virtual_offset == 0
|
|
|
|
: std::prev(it)->upper() == virtual_offset;
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
void EnableDirectMappedAddress() {
|
|
|
|
// TODO
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
HANDLE process{}; ///< Current process handle
|
|
|
|
HANDLE backing_handle{}; ///< File based backing memory
|
|
|
|
|
2021-06-08 07:14:12 +02:00
|
|
|
DynamicLibrary kernelbase_dll;
|
|
|
|
PFN_CreateFileMapping2 pfn_CreateFileMapping2{};
|
|
|
|
PFN_VirtualAlloc2 pfn_VirtualAlloc2{};
|
|
|
|
PFN_MapViewOfFile3 pfn_MapViewOfFile3{};
|
|
|
|
PFN_UnmapViewOfFile2 pfn_UnmapViewOfFile2{};
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
std::mutex placeholder_mutex; ///< Mutex for placeholders
|
|
|
|
boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders
|
|
|
|
std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset
|
|
|
|
};
|
|
|
|
|
2021-07-27 22:00:09 +02:00
|
|
|
#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv
|
2021-06-05 10:07:26 +02:00
|
|
|
|
2023-11-17 19:33:43 +01:00
|
|
|
#ifdef ARCHITECTURE_arm64
|
|
|
|
|
|
|
|
uint64_t GetRandomU64() {
|
|
|
|
uint64_t ret;
|
|
|
|
ASSERT(getrandom(&ret, sizeof(ret), 0) == 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* ChooseVirtualBase(size_t virtual_size) {
|
|
|
|
constexpr uintptr_t Map39BitSize = (1ULL << 39);
|
|
|
|
constexpr uintptr_t Map36BitSize = (1ULL << 36);
|
|
|
|
|
|
|
|
// Seed the MT with some initial strong randomness.
|
|
|
|
//
|
|
|
|
// This is not a cryptographic application, we just want something more
|
|
|
|
// random than the current time.
|
|
|
|
std::mt19937_64 rng(GetRandomU64());
|
|
|
|
|
|
|
|
// We want to ensure we are allocating at an address aligned to the L2 block size.
|
|
|
|
// For Qualcomm devices, we must also allocate memory above 36 bits.
|
|
|
|
const size_t lower = Map36BitSize / HugePageSize;
|
|
|
|
const size_t upper = (Map39BitSize - virtual_size) / HugePageSize;
|
|
|
|
const size_t range = upper - lower;
|
|
|
|
|
|
|
|
// Try up to 64 times to allocate memory at random addresses in the range.
|
|
|
|
for (int i = 0; i < 64; i++) {
|
|
|
|
// Calculate a possible location.
|
|
|
|
uintptr_t hint_address = ((rng() % range) + lower) * HugePageSize;
|
|
|
|
|
|
|
|
// Try to map.
|
|
|
|
// Note: we may be able to take advantage of MAP_FIXED_NOREPLACE here.
|
|
|
|
void* map_pointer =
|
|
|
|
mmap(reinterpret_cast<void*>(hint_address), virtual_size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
|
|
|
|
|
|
|
|
// If we successfully mapped, we're done.
|
|
|
|
if (reinterpret_cast<uintptr_t>(map_pointer) == hint_address) {
|
|
|
|
return map_pointer;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmap if necessary, and try again.
|
|
|
|
if (map_pointer != MAP_FAILED) {
|
|
|
|
munmap(map_pointer, virtual_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return MAP_FAILED;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void* ChooseVirtualBase(size_t virtual_size) {
|
|
|
|
return mmap(nullptr, virtual_size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-06-05 10:07:26 +02:00
|
|
|
class HostMemory::Impl {
|
|
|
|
public:
|
|
|
|
explicit Impl(size_t backing_size_, size_t virtual_size_)
|
|
|
|
: backing_size{backing_size_}, virtual_size{virtual_size_} {
|
|
|
|
bool good = false;
|
|
|
|
SCOPE_EXIT({
|
|
|
|
if (!good) {
|
|
|
|
Release();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2022-11-06 22:45:36 +01:00
|
|
|
long page_size = sysconf(_SC_PAGESIZE);
|
|
|
|
if (page_size != 0x1000) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "page size {:#x} is incompatible with 4K paging", page_size);
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
|
2021-06-05 10:07:26 +02:00
|
|
|
// Backing memory initialization
|
2022-12-18 08:29:15 +01:00
|
|
|
#ifdef ANDROID
|
|
|
|
fd = ASharedMemory_create("HostMemory", backing_size);
|
|
|
|
#elif defined(__FreeBSD__) && __FreeBSD__ < 13
|
2021-07-27 22:06:25 +02:00
|
|
|
// XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30
|
|
|
|
fd = shm_open(SHM_ANON, O_RDWR, 0600);
|
|
|
|
#else
|
2021-06-05 10:07:26 +02:00
|
|
|
fd = memfd_create("HostMemory", 0);
|
2021-07-27 22:06:25 +02:00
|
|
|
#endif
|
2022-12-18 08:29:15 +01:00
|
|
|
if (fd < 0) {
|
2021-06-05 10:07:26 +02:00
|
|
|
LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno));
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
|
2022-12-18 08:29:15 +01:00
|
|
|
#ifndef ANDROID
|
2021-06-05 10:07:26 +02:00
|
|
|
// Defined to extend the file with zeros
|
|
|
|
int ret = ftruncate(fd, backing_size);
|
|
|
|
if (ret != 0) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?",
|
|
|
|
strerror(errno));
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
2022-12-18 08:29:15 +01:00
|
|
|
#endif
|
2021-06-05 10:07:26 +02:00
|
|
|
|
|
|
|
backing_base = static_cast<u8*>(
|
|
|
|
mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
|
|
|
|
if (backing_base == MAP_FAILED) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Virtual memory initialization
|
2023-01-01 12:38:49 +01:00
|
|
|
#if defined(__FreeBSD__)
|
|
|
|
virtual_base =
|
|
|
|
static_cast<u8*>(mmap(nullptr, virtual_size, PROT_NONE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER, -1, 0));
|
|
|
|
if (virtual_base == MAP_FAILED) {
|
|
|
|
virtual_base = static_cast<u8*>(
|
|
|
|
mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
|
|
|
if (virtual_base == MAP_FAILED) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2023-11-17 19:57:39 +01:00
|
|
|
virtual_base = virtual_map_base = static_cast<u8*>(ChooseVirtualBase(virtual_size));
|
2021-06-05 10:07:26 +02:00
|
|
|
if (virtual_base == MAP_FAILED) {
|
|
|
|
LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
|
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
2023-01-01 12:38:49 +01:00
|
|
|
madvise(virtual_base, virtual_size, MADV_HUGEPAGE);
|
|
|
|
#endif
|
2021-06-05 10:07:26 +02:00
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
free_manager.SetAddressSpace(virtual_base, virtual_size);
|
2021-06-05 10:07:26 +02:00
|
|
|
good = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
~Impl() {
|
|
|
|
Release();
|
|
|
|
}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms) {
|
2023-11-17 19:57:39 +01:00
|
|
|
// Intersect the range with our address space.
|
|
|
|
AdjustMap(&virtual_offset, &length);
|
|
|
|
|
|
|
|
// We are removing a placeholder.
|
|
|
|
free_manager.AllocateBlock(virtual_base + virtual_offset, length);
|
2021-06-05 10:07:26 +02:00
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
// Deduce mapping protection flags.
|
|
|
|
int flags = PROT_NONE;
|
|
|
|
if (True(perms & MemoryPermission::Read)) {
|
|
|
|
flags |= PROT_READ;
|
|
|
|
}
|
|
|
|
if (True(perms & MemoryPermission::Write)) {
|
|
|
|
flags |= PROT_WRITE;
|
|
|
|
}
|
|
|
|
#ifdef ARCHITECTURE_arm64
|
|
|
|
if (True(perms & MemoryPermission::Execute)) {
|
|
|
|
flags |= PROT_EXEC;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void* ret = mmap(virtual_base + virtual_offset, length, flags, MAP_SHARED | MAP_FIXED, fd,
|
|
|
|
host_offset);
|
2021-06-05 10:07:26 +02:00
|
|
|
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unmap(size_t virtual_offset, size_t length) {
|
|
|
|
// The method name is wrong. We're still talking about the virtual range.
|
|
|
|
// We don't want to unmap, we want to reserve this memory.
|
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
// Intersect the range with our address space.
|
|
|
|
AdjustMap(&virtual_offset, &length);
|
2023-06-01 17:21:22 +02:00
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
// Merge with any adjacent placeholder mappings.
|
|
|
|
auto [merged_pointer, merged_size] =
|
|
|
|
free_manager.FreeBlock(virtual_base + virtual_offset, length);
|
2023-06-01 17:21:22 +02:00
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
void* ret = mmap(merged_pointer, merged_size, PROT_NONE,
|
2021-06-05 10:07:26 +02:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
|
|
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
2023-11-17 19:57:39 +01:00
|
|
|
// Intersect the range with our address space.
|
|
|
|
AdjustMap(&virtual_offset, &length);
|
|
|
|
|
|
|
|
int flags = PROT_NONE;
|
2021-06-05 10:07:26 +02:00
|
|
|
if (read) {
|
|
|
|
flags |= PROT_READ;
|
|
|
|
}
|
|
|
|
if (write) {
|
|
|
|
flags |= PROT_WRITE;
|
|
|
|
}
|
2023-11-17 20:43:15 +01:00
|
|
|
#ifdef ARCHITECTURE_arm64
|
|
|
|
if (execute) {
|
|
|
|
flags |= PROT_EXEC;
|
|
|
|
}
|
|
|
|
#endif
|
2021-06-05 10:07:26 +02:00
|
|
|
int ret = mprotect(virtual_base + virtual_offset, length, flags);
|
|
|
|
ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
void EnableDirectMappedAddress() {
|
|
|
|
virtual_base = nullptr;
|
|
|
|
}
|
|
|
|
|
2021-06-05 10:07:26 +02:00
|
|
|
const size_t backing_size; ///< Size of the backing memory in bytes
|
|
|
|
const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
|
|
|
|
|
|
|
|
u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)};
|
|
|
|
u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)};
|
2023-11-17 19:57:39 +01:00
|
|
|
u8* virtual_map_base{reinterpret_cast<u8*>(MAP_FAILED)};
|
2021-06-05 10:07:26 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// Release all resources in the object
|
|
|
|
void Release() {
|
2023-11-17 19:57:39 +01:00
|
|
|
if (virtual_map_base != MAP_FAILED) {
|
|
|
|
int ret = munmap(virtual_map_base, virtual_size);
|
2021-06-05 10:07:26 +02:00
|
|
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (backing_base != MAP_FAILED) {
|
|
|
|
int ret = munmap(backing_base, backing_size);
|
|
|
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fd != -1) {
|
|
|
|
int ret = close(fd);
|
|
|
|
ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
void AdjustMap(size_t* virtual_offset, size_t* length) {
|
|
|
|
if (virtual_base != nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are direct mapped, we want to make sure we are operating on a region
|
|
|
|
// that is in range of our virtual mapping.
|
|
|
|
size_t intended_start = *virtual_offset;
|
|
|
|
size_t intended_end = intended_start + *length;
|
|
|
|
size_t address_space_start = reinterpret_cast<size_t>(virtual_map_base);
|
|
|
|
size_t address_space_end = address_space_start + virtual_size;
|
2023-06-01 17:21:22 +02:00
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
if (address_space_start > intended_end || intended_start > address_space_end) {
|
|
|
|
*virtual_offset = 0;
|
|
|
|
*length = 0;
|
|
|
|
} else {
|
|
|
|
*virtual_offset = std::max(intended_start, address_space_start);
|
|
|
|
*length = std::min(intended_end, address_space_end) - *virtual_offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create
|
|
|
|
FreeRegionManager free_manager{};
|
2021-06-05 10:07:26 +02:00
|
|
|
};
|
|
|
|
|
2021-06-11 11:47:23 +02:00
|
|
|
#else // ^^^ Linux ^^^ vvv Generic vvv
|
2021-06-05 11:23:25 +02:00
|
|
|
|
2021-06-11 11:47:23 +02:00
|
|
|
class HostMemory::Impl {
|
|
|
|
public:
|
|
|
|
explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) {
|
|
|
|
// This is just a place holder.
|
2023-03-12 04:10:38 +01:00
|
|
|
// Please implement fastmem in a proper way on your platform.
|
2021-06-11 11:47:23 +02:00
|
|
|
throw std::bad_alloc{};
|
|
|
|
}
|
2021-06-05 11:23:25 +02:00
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm) {}
|
2021-06-11 11:47:23 +02:00
|
|
|
|
|
|
|
void Unmap(size_t virtual_offset, size_t length) {}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {}
|
2021-06-11 11:47:23 +02:00
|
|
|
|
|
|
|
u8* backing_base{nullptr};
|
|
|
|
u8* virtual_base{nullptr};
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif // ^^^ Generic ^^^
|
2021-06-05 11:23:25 +02:00
|
|
|
|
2021-06-05 11:47:08 +02:00
|
|
|
HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_)
|
2021-06-11 11:47:23 +02:00
|
|
|
: backing_size(backing_size_), virtual_size(virtual_size_) {
|
|
|
|
try {
|
|
|
|
// Try to allocate a fastmem arena.
|
|
|
|
// The implementation will fail with std::bad_alloc on errors.
|
2023-11-17 19:57:39 +01:00
|
|
|
impl =
|
|
|
|
std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment),
|
|
|
|
AlignUp(virtual_size, PageAlignment) + HugePageSize);
|
2021-06-11 11:47:23 +02:00
|
|
|
backing_base = impl->backing_base;
|
|
|
|
virtual_base = impl->virtual_base;
|
|
|
|
|
|
|
|
if (virtual_base) {
|
2023-11-17 19:57:39 +01:00
|
|
|
// Ensure the virtual base is aligned to the L2 block size.
|
|
|
|
virtual_base = reinterpret_cast<u8*>(
|
|
|
|
Common::AlignUp(reinterpret_cast<uintptr_t>(virtual_base), HugePageSize));
|
2021-06-11 11:47:23 +02:00
|
|
|
virtual_base_offset = virtual_base - impl->virtual_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
} catch (const std::bad_alloc&) {
|
|
|
|
LOG_CRITICAL(HW_Memory,
|
|
|
|
"Fastmem unavailable, falling back to VirtualBuffer for memory allocation");
|
|
|
|
fallback_buffer = std::make_unique<Common::VirtualBuffer<u8>>(backing_size);
|
|
|
|
backing_base = fallback_buffer->data();
|
|
|
|
virtual_base = nullptr;
|
|
|
|
}
|
2021-06-05 11:47:08 +02:00
|
|
|
}
|
2021-06-05 11:23:25 +02:00
|
|
|
|
|
|
|
HostMemory::~HostMemory() = default;
|
|
|
|
|
|
|
|
HostMemory::HostMemory(HostMemory&&) noexcept = default;
|
|
|
|
|
|
|
|
HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
|
|
|
MemoryPermission perms) {
|
2021-06-05 11:23:25 +02:00
|
|
|
ASSERT(virtual_offset % PageAlignment == 0);
|
|
|
|
ASSERT(host_offset % PageAlignment == 0);
|
|
|
|
ASSERT(length % PageAlignment == 0);
|
2021-06-05 11:47:08 +02:00
|
|
|
ASSERT(virtual_offset + length <= virtual_size);
|
|
|
|
ASSERT(host_offset + length <= backing_size);
|
2021-06-11 11:47:23 +02:00
|
|
|
if (length == 0 || !virtual_base || !impl) {
|
2021-06-05 11:23:25 +02:00
|
|
|
return;
|
|
|
|
}
|
2023-11-17 20:43:15 +01:00
|
|
|
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void HostMemory::Unmap(size_t virtual_offset, size_t length) {
|
|
|
|
ASSERT(virtual_offset % PageAlignment == 0);
|
|
|
|
ASSERT(length % PageAlignment == 0);
|
2021-06-05 11:47:08 +02:00
|
|
|
ASSERT(virtual_offset + length <= virtual_size);
|
2021-06-11 11:47:23 +02:00
|
|
|
if (length == 0 || !virtual_base || !impl) {
|
2021-06-05 11:23:25 +02:00
|
|
|
return;
|
|
|
|
}
|
2021-06-05 11:47:08 +02:00
|
|
|
impl->Unmap(virtual_offset + virtual_base_offset, length);
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
|
2023-11-17 20:43:15 +01:00
|
|
|
void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write,
|
|
|
|
bool execute) {
|
2021-06-05 11:23:25 +02:00
|
|
|
ASSERT(virtual_offset % PageAlignment == 0);
|
|
|
|
ASSERT(length % PageAlignment == 0);
|
2021-06-05 11:47:08 +02:00
|
|
|
ASSERT(virtual_offset + length <= virtual_size);
|
2021-06-11 11:47:23 +02:00
|
|
|
if (length == 0 || !virtual_base || !impl) {
|
2021-06-05 11:23:25 +02:00
|
|
|
return;
|
|
|
|
}
|
2023-11-17 20:43:15 +01:00
|
|
|
impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
|
2021-06-05 11:23:25 +02:00
|
|
|
}
|
|
|
|
|
2023-11-17 19:57:39 +01:00
|
|
|
void HostMemory::EnableDirectMappedAddress() {
|
|
|
|
if (impl) {
|
|
|
|
impl->EnableDirectMappedAddress();
|
|
|
|
virtual_size += reinterpret_cast<uintptr_t>(virtual_base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-05 11:23:25 +02:00
|
|
|
} // namespace Common
|