2022-04-23 10:59:50 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
2021-06-23 23:18:27 +02:00
|
|
|
#include "common/literals.h"
|
2020-04-08 23:28:42 +02:00
|
|
|
#include "common/scope_exit.h"
|
|
|
|
#include "core/core.h"
|
2021-02-13 00:47:05 +01:00
|
|
|
#include "core/hle/kernel/k_address_space_info.h"
|
2021-02-13 02:02:51 +01:00
|
|
|
#include "core/hle/kernel/k_memory_block.h"
|
|
|
|
#include "core/hle/kernel/k_memory_block_manager.h"
|
2022-06-26 06:15:31 +02:00
|
|
|
#include "core/hle/kernel/k_page_group.h"
|
2021-02-13 02:58:31 +01:00
|
|
|
#include "core/hle/kernel/k_page_table.h"
|
2021-04-24 07:04:28 +02:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2021-02-12 03:55:22 +01:00
|
|
|
#include "core/hle/kernel/k_resource_limit.h"
|
2021-02-05 02:06:54 +01:00
|
|
|
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
2021-02-12 03:55:22 +01:00
|
|
|
#include "core/hle/kernel/k_system_control.h"
|
2020-04-08 23:28:42 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2021-02-13 00:43:01 +01:00
|
|
|
#include "core/hle/kernel/svc_results.h"
|
2020-04-08 23:28:42 +02:00
|
|
|
#include "core/memory.h"
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
namespace Kernel {
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2021-06-23 23:18:27 +02:00
|
|
|
using namespace Common::Literals;
|
|
|
|
|
2020-04-08 23:28:42 +02:00
|
|
|
constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
|
|
|
|
switch (as_type) {
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
|
|
|
return 32;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
|
|
|
return 36;
|
|
|
|
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
|
|
|
return 39;
|
|
|
|
default:
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(false);
|
2020-04-08 23:28:42 +02:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2022-02-02 04:34:24 +01:00
|
|
|
KPageTable::KPageTable(Core::System& system_)
|
|
|
|
: general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-02 18:59:36 +01:00
|
|
|
KPageTable::~KPageTable() = default;
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
|
|
|
VAddr code_addr, std::size_t code_size,
|
2022-09-10 06:38:28 +02:00
|
|
|
KMemoryBlockSlabManager* mem_block_slab_manager,
|
2022-06-26 05:44:19 +02:00
|
|
|
KMemoryManager::Pool pool) {
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-02-13 00:47:05 +01:00
|
|
|
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
|
|
|
|
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
|
2020-04-08 23:28:42 +02:00
|
|
|
};
|
2021-02-13 00:47:05 +01:00
|
|
|
const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) {
|
|
|
|
return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type);
|
2020-04-08 23:28:42 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Set our width and heap/alias sizes
|
|
|
|
address_space_width = GetAddressSpaceWidthFromType(as_type);
|
|
|
|
const VAddr start = 0;
|
|
|
|
const VAddr end{1ULL << address_space_width};
|
2021-02-13 00:47:05 +01:00
|
|
|
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
|
|
|
|
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
ASSERT(code_addr < code_addr + code_size);
|
|
|
|
ASSERT(code_addr + code_size - 1 <= end - 1);
|
|
|
|
|
|
|
|
// Adjust heap/alias size if we don't have an alias region
|
|
|
|
if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
|
|
|
|
heap_region_size += alias_region_size;
|
|
|
|
alias_region_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set code regions and determine remaining
|
2021-06-23 23:18:27 +02:00
|
|
|
constexpr std::size_t RegionAlignment{2_MiB};
|
2020-04-08 23:28:42 +02:00
|
|
|
VAddr process_code_start{};
|
|
|
|
VAddr process_code_end{};
|
|
|
|
std::size_t stack_region_size{};
|
|
|
|
std::size_t kernel_map_region_size{};
|
|
|
|
|
|
|
|
if (address_space_width == 39) {
|
2021-02-13 00:47:05 +01:00
|
|
|
alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias);
|
|
|
|
heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap);
|
|
|
|
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack);
|
|
|
|
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
|
|
|
|
code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit);
|
|
|
|
code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit);
|
2020-04-08 23:28:42 +02:00
|
|
|
alias_code_region_start = code_region_start;
|
|
|
|
alias_code_region_end = code_region_end;
|
|
|
|
process_code_start = Common::AlignDown(code_addr, RegionAlignment);
|
|
|
|
process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
|
|
|
|
} else {
|
|
|
|
stack_region_size = 0;
|
|
|
|
kernel_map_region_size = 0;
|
2021-02-13 00:47:05 +01:00
|
|
|
code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall);
|
|
|
|
code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall);
|
2020-04-08 23:28:42 +02:00
|
|
|
stack_region_start = code_region_start;
|
|
|
|
alias_code_region_start = code_region_start;
|
2021-02-13 00:47:05 +01:00
|
|
|
alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) +
|
|
|
|
GetSpaceSize(KAddressSpaceInfo::Type::MapLarge);
|
2020-04-08 23:28:42 +02:00
|
|
|
stack_region_end = code_region_end;
|
|
|
|
kernel_map_region_start = code_region_start;
|
|
|
|
kernel_map_region_end = code_region_end;
|
|
|
|
process_code_start = code_region_start;
|
|
|
|
process_code_end = code_region_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set other basic fields
|
|
|
|
is_aslr_enabled = enable_aslr;
|
|
|
|
address_space_start = start;
|
|
|
|
address_space_end = end;
|
|
|
|
is_kernel = false;
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_slab_manager = mem_block_slab_manager;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
// Determine the region we can place our undetermineds in
|
|
|
|
VAddr alloc_start{};
|
|
|
|
std::size_t alloc_size{};
|
|
|
|
if ((process_code_start - code_region_start) >= (end - process_code_end)) {
|
|
|
|
alloc_start = code_region_start;
|
|
|
|
alloc_size = process_code_start - code_region_start;
|
|
|
|
} else {
|
|
|
|
alloc_start = process_code_end;
|
|
|
|
alloc_size = end - process_code_end;
|
|
|
|
}
|
|
|
|
const std::size_t needed_size{
|
|
|
|
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
|
|
|
|
if (alloc_size < needed_size) {
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(false);
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultOutOfMemory;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
const std::size_t remaining_size{alloc_size - needed_size};
|
|
|
|
|
|
|
|
// Determine random placements for each region
|
|
|
|
std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
|
|
|
|
if (enable_aslr) {
|
2021-02-12 03:55:22 +01:00
|
|
|
alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
|
2020-04-08 23:28:42 +02:00
|
|
|
RegionAlignment;
|
2021-02-12 03:55:22 +01:00
|
|
|
heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
|
2020-04-08 23:28:42 +02:00
|
|
|
RegionAlignment;
|
2021-02-12 03:55:22 +01:00
|
|
|
stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
|
2020-04-08 23:28:42 +02:00
|
|
|
RegionAlignment;
|
2021-02-12 03:55:22 +01:00
|
|
|
kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
|
2020-04-08 23:28:42 +02:00
|
|
|
RegionAlignment;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup heap and alias regions
|
|
|
|
alias_region_start = alloc_start + alias_rnd;
|
|
|
|
alias_region_end = alias_region_start + alias_region_size;
|
|
|
|
heap_region_start = alloc_start + heap_rnd;
|
|
|
|
heap_region_end = heap_region_start + heap_region_size;
|
|
|
|
|
|
|
|
if (alias_rnd <= heap_rnd) {
|
|
|
|
heap_region_start += alias_region_size;
|
|
|
|
heap_region_end += alias_region_size;
|
|
|
|
} else {
|
|
|
|
alias_region_start += heap_region_size;
|
|
|
|
alias_region_end += heap_region_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup stack region
|
|
|
|
if (stack_region_size) {
|
|
|
|
stack_region_start = alloc_start + stack_rnd;
|
|
|
|
stack_region_end = stack_region_start + stack_region_size;
|
|
|
|
|
|
|
|
if (alias_rnd < stack_rnd) {
|
|
|
|
stack_region_start += alias_region_size;
|
|
|
|
stack_region_end += alias_region_size;
|
|
|
|
} else {
|
|
|
|
alias_region_start += stack_region_size;
|
|
|
|
alias_region_end += stack_region_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (heap_rnd < stack_rnd) {
|
|
|
|
stack_region_start += heap_region_size;
|
|
|
|
stack_region_end += heap_region_size;
|
|
|
|
} else {
|
|
|
|
heap_region_start += stack_region_size;
|
|
|
|
heap_region_end += stack_region_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup kernel map region
|
|
|
|
if (kernel_map_region_size) {
|
|
|
|
kernel_map_region_start = alloc_start + kmap_rnd;
|
|
|
|
kernel_map_region_end = kernel_map_region_start + kernel_map_region_size;
|
|
|
|
|
|
|
|
if (alias_rnd < kmap_rnd) {
|
|
|
|
kernel_map_region_start += alias_region_size;
|
|
|
|
kernel_map_region_end += alias_region_size;
|
|
|
|
} else {
|
|
|
|
alias_region_start += kernel_map_region_size;
|
|
|
|
alias_region_end += kernel_map_region_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (heap_rnd < kmap_rnd) {
|
|
|
|
kernel_map_region_start += heap_region_size;
|
|
|
|
kernel_map_region_end += heap_region_size;
|
|
|
|
} else {
|
|
|
|
heap_region_start += kernel_map_region_size;
|
|
|
|
heap_region_end += kernel_map_region_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stack_region_size) {
|
|
|
|
if (stack_rnd < kmap_rnd) {
|
|
|
|
kernel_map_region_start += stack_region_size;
|
|
|
|
kernel_map_region_end += stack_region_size;
|
|
|
|
} else {
|
|
|
|
stack_region_start += kernel_map_region_size;
|
|
|
|
stack_region_end += kernel_map_region_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set heap members
|
|
|
|
current_heap_end = heap_region_start;
|
|
|
|
max_heap_size = 0;
|
|
|
|
max_physical_memory_size = 0;
|
|
|
|
|
|
|
|
// Ensure that we regions inside our address space
|
|
|
|
auto IsInAddressSpace = [&](VAddr addr) {
|
|
|
|
return address_space_start <= addr && addr <= address_space_end;
|
|
|
|
};
|
|
|
|
ASSERT(IsInAddressSpace(alias_region_start));
|
|
|
|
ASSERT(IsInAddressSpace(alias_region_end));
|
|
|
|
ASSERT(IsInAddressSpace(heap_region_start));
|
|
|
|
ASSERT(IsInAddressSpace(heap_region_end));
|
|
|
|
ASSERT(IsInAddressSpace(stack_region_start));
|
|
|
|
ASSERT(IsInAddressSpace(stack_region_end));
|
|
|
|
ASSERT(IsInAddressSpace(kernel_map_region_start));
|
|
|
|
ASSERT(IsInAddressSpace(kernel_map_region_end));
|
|
|
|
|
|
|
|
// Ensure that we selected regions that don't overlap
|
|
|
|
const VAddr alias_start{alias_region_start};
|
|
|
|
const VAddr alias_last{alias_region_end - 1};
|
|
|
|
const VAddr heap_start{heap_region_start};
|
|
|
|
const VAddr heap_last{heap_region_end - 1};
|
|
|
|
const VAddr stack_start{stack_region_start};
|
|
|
|
const VAddr stack_last{stack_region_end - 1};
|
|
|
|
const VAddr kmap_start{kernel_map_region_start};
|
|
|
|
const VAddr kmap_last{kernel_map_region_end - 1};
|
|
|
|
ASSERT(alias_last < heap_start || heap_last < alias_start);
|
|
|
|
ASSERT(alias_last < stack_start || stack_last < alias_start);
|
|
|
|
ASSERT(alias_last < kmap_start || kmap_last < alias_start);
|
|
|
|
ASSERT(heap_last < stack_start || stack_last < heap_start);
|
|
|
|
ASSERT(heap_last < kmap_start || kmap_last < heap_start);
|
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
current_heap_end = heap_region_start;
|
|
|
|
max_heap_size = 0;
|
|
|
|
mapped_physical_memory_size = 0;
|
2020-04-08 23:28:42 +02:00
|
|
|
memory_pool = pool;
|
|
|
|
|
core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.
Configure Dynarmic to mask out these bits on pointer reads.
While we are at it, remove some unused attributes carried over from
Citra.
Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:
mov rdi,rcx
shr rdx,0Ch
mov r8,qword ptr [rax+8]
mov rax,qword ptr [r8+rdx*8]
mov rdx,rax
-and al,3
and rdx,0FFFFFFFFFFFFFFFCh
je Core::Memory::Memory::Impl::Read<unsigned char>
mov rax,qword ptr [vaddr]
movzx eax,byte ptr [rdx+rax]
2020-12-30 01:16:57 +01:00
|
|
|
page_table_impl.Resize(address_space_width, PageBits);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
return memory_block_manager.Initialize(address_space_start, address_space_end,
|
|
|
|
memory_block_slab_manager);
|
|
|
|
}
|
|
|
|
|
|
|
|
void KPageTable::Finalize() {
|
|
|
|
memory_block_manager.Finalize(memory_block_slab_manager, [&](VAddr addr, u64 size) {
|
|
|
|
system.Memory().UnmapRegion(page_table_impl, addr, size);
|
|
|
|
});
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
|
|
|
|
KMemoryPermission perm) {
|
2020-04-08 23:28:42 +02:00
|
|
|
const u64 size{num_pages * PageSize};
|
|
|
|
|
2022-01-15 08:27:36 +01:00
|
|
|
// Validate the mapping request.
|
|
|
|
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-15 08:27:36 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2022-01-15 08:27:36 +01:00
|
|
|
|
|
|
|
// Verify that the destination memory is unmapped.
|
|
|
|
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None, KMemoryAttribute::None));
|
2022-09-10 06:38:28 +02:00
|
|
|
|
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager);
|
|
|
|
|
|
|
|
// Allocate and open.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup pg;
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
|
|
|
&pg, num_pages,
|
|
|
|
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Update the blocks.
|
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
2022-03-03 02:59:54 +01:00
|
|
|
// Validate the mapping request.
|
|
|
|
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
|
|
|
ResultInvalidMemoryRegion);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Verify that the source memory is normal heap.
|
|
|
|
KMemoryState src_state{};
|
|
|
|
KMemoryPermission src_perm{};
|
|
|
|
std::size_t num_src_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
|
|
|
|
src_address, size, KMemoryState::All, KMemoryState::Normal,
|
|
|
|
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Verify that the destination memory is unmapped.
|
|
|
|
std::size_t num_dst_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
|
|
|
|
KMemoryState::Free, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryAttribute::None));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator for the source.
|
|
|
|
Result src_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(
|
|
|
|
std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
|
|
|
|
|
|
|
// Create an update allocator for the destination.
|
|
|
|
Result dst_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(
|
|
|
|
std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Map the code memory.
|
2020-04-08 23:28:42 +02:00
|
|
|
{
|
2022-03-03 02:59:54 +01:00
|
|
|
// Determine the number of pages being operated on.
|
|
|
|
const std::size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
// Create page groups for the memory being mapped.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup pg;
|
2022-03-03 02:59:54 +01:00
|
|
|
AddRegionToPages(src_address, num_pages, pg);
|
|
|
|
|
|
|
|
// Reprotect the source as kernel-read/not mapped.
|
|
|
|
const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
|
|
|
|
KMemoryPermission::NotMapped);
|
|
|
|
R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
|
|
|
|
|
|
|
|
// Ensure that we unprotect the source pages on failure.
|
|
|
|
auto unprot_guard = SCOPE_GUARD({
|
|
|
|
ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
|
|
|
|
.IsSuccess());
|
|
|
|
});
|
|
|
|
|
|
|
|
// Map the alias pages.
|
|
|
|
R_TRY(MapPages(dst_address, pg, new_perm));
|
|
|
|
|
|
|
|
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
|
|
|
|
// failure.
|
|
|
|
unprot_guard.Cancel();
|
|
|
|
|
|
|
|
// Apply the memory block updates.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
|
|
|
|
src_state, new_perm, KMemoryAttribute::Locked,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Locked,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
|
|
|
memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
|
|
|
|
KMemoryState::AliasCode, new_perm, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
|
|
|
ICacheInvalidationStrategy icache_invalidation_strategy) {
|
2022-03-03 02:59:54 +01:00
|
|
|
// Validate the mapping request.
|
|
|
|
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
|
|
|
ResultInvalidMemoryRegion);
|
|
|
|
|
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Verify that the source memory is locked normal heap.
|
|
|
|
std::size_t num_src_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
|
|
|
|
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::Locked));
|
|
|
|
|
|
|
|
// Verify that the destination memory is aliasable code.
|
|
|
|
std::size_t num_dst_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(
|
|
|
|
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
|
|
|
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
|
|
|
|
|
|
|
// Determine whether any pages being unmapped are code.
|
|
|
|
bool any_code_pages = false;
|
|
|
|
{
|
2022-09-10 06:38:28 +02:00
|
|
|
KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(dst_address);
|
2022-03-03 02:59:54 +01:00
|
|
|
while (true) {
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// Check if the memory has code flag.
|
|
|
|
if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
|
|
|
|
any_code_pages = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
++it;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Ensure that we maintain the instruction cache.
|
|
|
|
bool reprotected_pages = false;
|
|
|
|
SCOPE_EXIT({
|
|
|
|
if (reprotected_pages && any_code_pages) {
|
2022-04-08 21:31:56 +02:00
|
|
|
if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
|
|
|
|
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
|
|
|
} else {
|
|
|
|
system.InvalidateCpuInstructionCaches();
|
|
|
|
}
|
2022-03-03 02:59:54 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Unmap.
|
|
|
|
{
|
|
|
|
// Determine the number of pages being operated on.
|
|
|
|
const std::size_t num_pages = size / PageSize;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator for the source.
|
|
|
|
Result src_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result),
|
|
|
|
memory_block_slab_manager,
|
|
|
|
num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
|
|
|
|
|
|
|
// Create an update allocator for the destination.
|
|
|
|
Result dst_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result),
|
|
|
|
memory_block_slab_manager,
|
|
|
|
num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Unmap the aliased copy of the pages.
|
|
|
|
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Try to set the permissions for the source pages back to what they should be.
|
|
|
|
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
|
|
|
|
OperationType::ChangePermissions));
|
2021-02-13 02:02:51 +01:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Apply the memory block updates.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
|
|
|
|
KMemoryState::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal);
|
|
|
|
memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages,
|
|
|
|
KMemoryState::Normal, KMemoryPermission::UserReadWrite,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Locked);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-03-03 02:59:54 +01:00
|
|
|
// Note that we reprotected pages.
|
|
|
|
reprotected_pages = true;
|
|
|
|
}
|
2021-10-13 20:21:02 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-03-12 01:39:36 +01:00
|
|
|
VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
|
|
|
std::size_t num_pages, std::size_t alignment, std::size_t offset,
|
|
|
|
std::size_t guard_pages) {
|
|
|
|
VAddr address = 0;
|
|
|
|
|
|
|
|
if (num_pages <= region_num_pages) {
|
|
|
|
if (this->IsAslrEnabled()) {
|
2022-09-10 06:38:28 +02:00
|
|
|
UNIMPLEMENTED();
|
2022-03-12 01:39:36 +01:00
|
|
|
}
|
|
|
|
// Find the first free area.
|
|
|
|
if (address == 0) {
|
2022-09-10 06:38:28 +02:00
|
|
|
address = memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages,
|
|
|
|
alignment, offset, guard_pages);
|
2022-03-12 01:39:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
2022-03-26 09:25:52 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
|
|
|
|
// We're making a new group, not adding to an existing one.
|
|
|
|
R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Begin traversal.
|
|
|
|
Common::PageTable::TraversalContext context;
|
|
|
|
Common::PageTable::TraversalEntry next_entry;
|
|
|
|
R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Prepare tracking variables.
|
|
|
|
PAddr cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
|
|
|
// Iterate, adding to group as we go.
|
|
|
|
const auto& memory_layout = system.Kernel().MemoryLayout();
|
|
|
|
while (tot_size < size) {
|
|
|
|
R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
|
|
|
|
ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
|
|
|
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we add the right amount for the last block.
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the last block.
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
|
|
|
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
|
2022-06-09 18:33:28 +02:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
const auto& pg = pg_ll.Nodes();
|
|
|
|
const auto& memory_layout = system.Kernel().MemoryLayout();
|
|
|
|
|
|
|
|
// Empty groups are necessarily invalid.
|
|
|
|
if (pg.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're going to validate that the group we'd expect is the group we see.
|
|
|
|
auto cur_it = pg.begin();
|
|
|
|
PAddr cur_block_address = cur_it->GetAddress();
|
|
|
|
size_t cur_block_pages = cur_it->GetNumPages();
|
|
|
|
|
|
|
|
auto UpdateCurrentIterator = [&]() {
|
|
|
|
if (cur_block_pages == 0) {
|
|
|
|
if ((++cur_it) == pg.end()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_block_address = cur_it->GetAddress();
|
|
|
|
cur_block_pages = cur_it->GetNumPages();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Begin traversal.
|
|
|
|
Common::PageTable::TraversalContext context;
|
|
|
|
Common::PageTable::TraversalEntry next_entry;
|
|
|
|
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare tracking variables.
|
|
|
|
PAddr cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
|
|
|
// Iterate, comparing expected to actual.
|
|
|
|
while (tot_size < size) {
|
|
|
|
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
|
|
|
|
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!UpdateCurrentIterator()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_block_address += cur_size;
|
|
|
|
cur_block_pages -= cur_pages;
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we compare the right amount for the last block.
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!UpdateCurrentIterator()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
|
|
|
VAddr src_addr) {
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2021-12-05 21:04:08 +01:00
|
|
|
|
|
|
|
const std::size_t num_pages{size / PageSize};
|
|
|
|
|
|
|
|
// Check that the memory is mapped in the destination process.
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
|
|
|
|
KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
|
|
|
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::None));
|
|
|
|
|
|
|
|
// Check that the memory is mapped in the source process.
|
|
|
|
R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
|
|
|
|
KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::None));
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2021-12-05 21:04:08 +01:00
|
|
|
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
|
|
|
|
|
|
|
// Apply the memory block update.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal);
|
2021-12-05 21:04:08 +01:00
|
|
|
|
2022-04-08 21:31:56 +02:00
|
|
|
system.InvalidateCpuInstructionCaches();
|
|
|
|
|
2021-12-05 21:04:08 +01:00
|
|
|
return ResultSuccess;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
2022-01-15 08:00:55 +01:00
|
|
|
// Lock the physical memory lock.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
2022-01-15 08:00:55 +01:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Calculate the last address for convenience.
|
|
|
|
const VAddr last_address = address + size - 1;
|
|
|
|
|
|
|
|
// Define iteration variables.
|
|
|
|
VAddr cur_address;
|
|
|
|
std::size_t mapped_size;
|
|
|
|
|
|
|
|
// The entire mapping process can be retried.
|
|
|
|
while (true) {
|
|
|
|
// Check if the memory is already mapped.
|
|
|
|
{
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Iterate over the memory.
|
|
|
|
cur_address = address;
|
|
|
|
mapped_size = 0;
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
if (info.GetState() != KMemoryState::Free) {
|
|
|
|
mapped_size += (last_address + 1 - cur_address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track the memory if it's mapped.
|
|
|
|
if (info.GetState() != KMemoryState::Free) {
|
|
|
|
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// If the size mapped is the size requested, we've nothing to do.
|
|
|
|
R_SUCCEED_IF(size == mapped_size);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Allocate and map the memory.
|
|
|
|
{
|
|
|
|
// Reserve the memory from the process resource limit.
|
|
|
|
KScopedResourceReservation memory_reservation(
|
|
|
|
system.Kernel().CurrentProcess()->GetResourceLimit(),
|
|
|
|
LimitableResource::PhysicalMemory, size - mapped_size);
|
|
|
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
|
|
|
|
|
|
|
// Allocate pages for the new memory.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup pg;
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
|
|
|
&pg, (size - mapped_size) / PageSize,
|
|
|
|
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Map the memory.
|
|
|
|
{
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
size_t num_allocator_blocks = 0;
|
|
|
|
|
|
|
|
// Verify that nobody has mapped memory since we first checked.
|
|
|
|
{
|
|
|
|
// Iterate over the memory.
|
|
|
|
size_t checked_mapped_size = 0;
|
|
|
|
cur_address = address;
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
const bool is_free = info.GetState() == KMemoryState::Free;
|
|
|
|
if (is_free) {
|
|
|
|
if (info.GetAddress() < address) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
if (last_address < info.GetLastAddress()) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
if (!is_free) {
|
|
|
|
checked_mapped_size += (last_address + 1 - cur_address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track the memory if it's mapped.
|
|
|
|
if (!is_free) {
|
|
|
|
checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the size now isn't what it was before, somebody mapped or unmapped
|
|
|
|
// concurrently. If this happened, retry.
|
|
|
|
if (mapped_size != checked_mapped_size) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager,
|
|
|
|
num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Reset the current tracking address, and make sure we clean up on failure.
|
|
|
|
cur_address = address;
|
|
|
|
auto unmap_guard = detail::ScopeExit([&] {
|
|
|
|
if (cur_address > address) {
|
|
|
|
const VAddr last_unmap_address = cur_address - 1;
|
|
|
|
|
|
|
|
// Iterate, unmapping the pages.
|
|
|
|
cur_address = address;
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// If the memory state is free, we mapped it and need to unmap it.
|
|
|
|
if (info.GetState() == KMemoryState::Free) {
|
|
|
|
// Determine the range to unmap.
|
|
|
|
const size_t cur_pages =
|
|
|
|
std::min(VAddr(info.GetEndAddress()) - cur_address,
|
|
|
|
last_unmap_address + 1 - cur_address) /
|
|
|
|
PageSize;
|
|
|
|
|
|
|
|
// Unmap.
|
|
|
|
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
|
|
|
|
OperationType::Unmap)
|
|
|
|
.IsSuccess());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (last_unmap_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Iterate over the memory.
|
2022-02-26 19:46:31 +01:00
|
|
|
auto pg_it = pg.Nodes().begin();
|
2022-02-19 08:42:27 +01:00
|
|
|
PAddr pg_phys_addr = pg_it->GetAddress();
|
|
|
|
size_t pg_pages = pg_it->GetNumPages();
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// If it's unmapped, we need to map it.
|
|
|
|
if (info.GetState() == KMemoryState::Free) {
|
|
|
|
// Determine the range to map.
|
|
|
|
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
|
|
|
|
last_address + 1 - cur_address) /
|
|
|
|
PageSize;
|
|
|
|
|
|
|
|
// While we have pages to map, map them.
|
|
|
|
while (map_pages > 0) {
|
|
|
|
// Check if we're at the end of the physical block.
|
|
|
|
if (pg_pages == 0) {
|
|
|
|
// Ensure there are more pages to map.
|
2022-02-26 19:46:31 +01:00
|
|
|
ASSERT(pg_it != pg.Nodes().end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Advance our physical block.
|
|
|
|
++pg_it;
|
|
|
|
pg_phys_addr = pg_it->GetAddress();
|
|
|
|
pg_pages = pg_it->GetNumPages();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map whatever we can.
|
|
|
|
const size_t cur_pages = std::min(pg_pages, map_pages);
|
|
|
|
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
|
|
|
OperationType::Map, pg_phys_addr));
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address += cur_pages * PageSize;
|
|
|
|
map_pages -= cur_pages;
|
|
|
|
|
|
|
|
pg_phys_addr += cur_pages * PageSize;
|
|
|
|
pg_pages -= cur_pages;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We succeeded, so commit the memory reservation.
|
|
|
|
memory_reservation.Commit();
|
|
|
|
|
|
|
|
// Increase our tracked mapped size.
|
|
|
|
mapped_physical_memory_size += (size - mapped_size);
|
|
|
|
|
|
|
|
// Update the relevant memory blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.UpdateIfMatch(
|
|
|
|
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
|
|
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Cancel our guard.
|
|
|
|
unmap_guard.Cancel();
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
2022-02-19 08:42:27 +01:00
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
2022-02-19 08:42:27 +01:00
|
|
|
// Lock the physical memory lock.
|
|
|
|
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Calculate the last address for convenience.
|
|
|
|
const VAddr last_address = address + size - 1;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Define iteration variables.
|
|
|
|
VAddr cur_address = 0;
|
|
|
|
std::size_t mapped_size = 0;
|
|
|
|
std::size_t num_allocator_blocks = 0;
|
2022-01-15 08:00:55 +01:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Check if the memory is mapped.
|
|
|
|
{
|
|
|
|
// Iterate over the memory.
|
|
|
|
cur_address = address;
|
|
|
|
mapped_size = 0;
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// Verify the memory's state.
|
|
|
|
const bool is_normal = info.GetState() == KMemoryState::Normal &&
|
|
|
|
info.GetAttribute() == KMemoryAttribute::None;
|
|
|
|
const bool is_free = info.GetState() == KMemoryState::Free;
|
|
|
|
R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
if (is_normal) {
|
|
|
|
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
if (info.GetAddress() < address) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
if (last_address < info.GetLastAddress()) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
}
|
2022-01-15 08:00:55 +01:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Check if we're done.
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
if (is_normal) {
|
|
|
|
mapped_size += (last_address + 1 - cur_address);
|
|
|
|
}
|
|
|
|
break;
|
2022-01-15 08:00:55 +01:00
|
|
|
}
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Track the memory if it's mapped.
|
|
|
|
if (is_normal) {
|
|
|
|
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
|
|
|
|
}
|
2022-01-15 08:00:55 +01:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
2022-01-15 08:00:55 +01:00
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// If there's nothing mapped, we've nothing to do.
|
|
|
|
R_SUCCEED_IF(mapped_size == 0);
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Make a page group for the unmap region.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup pg;
|
2022-02-19 08:42:27 +01:00
|
|
|
{
|
|
|
|
auto& impl = this->PageTableImpl();
|
|
|
|
|
|
|
|
// Begin traversal.
|
|
|
|
Common::PageTable::TraversalContext context;
|
|
|
|
Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
|
|
|
|
bool cur_valid = false;
|
|
|
|
Common::PageTable::TraversalEntry next_entry;
|
|
|
|
bool next_valid = false;
|
|
|
|
size_t tot_size = 0;
|
|
|
|
|
|
|
|
cur_address = address;
|
2022-02-19 09:14:27 +01:00
|
|
|
next_valid = impl.BeginTraversal(next_entry, context, cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
next_entry.block_size =
|
|
|
|
(next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
|
|
|
|
|
|
|
|
// Iterate, building the group.
|
|
|
|
while (true) {
|
|
|
|
if ((!next_valid && !cur_valid) ||
|
|
|
|
(next_valid && cur_valid &&
|
|
|
|
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
|
|
|
cur_entry.block_size += next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
if (cur_valid) {
|
|
|
|
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
|
|
|
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update tracking variables.
|
|
|
|
tot_size += cur_entry.block_size;
|
|
|
|
cur_entry = next_entry;
|
|
|
|
cur_valid = next_valid;
|
|
|
|
}
|
2022-02-02 04:34:24 +01:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
if (cur_entry.block_size + tot_size >= size) {
|
|
|
|
break;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 09:14:27 +01:00
|
|
|
next_valid = impl.ContinueTraversal(next_entry, context);
|
2022-02-19 08:42:27 +01:00
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Add the last block.
|
|
|
|
if (cur_valid) {
|
|
|
|
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
|
|
|
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Reset the current tracking address, and make sure we clean up on failure.
|
|
|
|
cur_address = address;
|
|
|
|
auto remap_guard = detail::ScopeExit([&] {
|
|
|
|
if (cur_address > address) {
|
|
|
|
const VAddr last_map_address = cur_address - 1;
|
|
|
|
cur_address = address;
|
|
|
|
|
|
|
|
// Iterate over the memory we unmapped.
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
auto pg_it = pg.Nodes().begin();
|
|
|
|
PAddr pg_phys_addr = pg_it->GetAddress();
|
|
|
|
size_t pg_pages = pg_it->GetNumPages();
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// Get the memory info for the pages we unmapped, convert to property.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// If the memory is normal, we unmapped it and need to re-map it.
|
|
|
|
if (info.GetState() == KMemoryState::Normal) {
|
|
|
|
// Determine the range to map.
|
|
|
|
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
|
|
|
|
last_map_address + 1 - cur_address) /
|
|
|
|
PageSize;
|
|
|
|
|
|
|
|
// While we have pages to map, map them.
|
|
|
|
while (map_pages > 0) {
|
|
|
|
// Check if we're at the end of the physical block.
|
|
|
|
if (pg_pages == 0) {
|
|
|
|
// Ensure there are more pages to map.
|
|
|
|
ASSERT(pg_it != pg.Nodes().end());
|
|
|
|
|
|
|
|
// Advance our physical block.
|
|
|
|
++pg_it;
|
|
|
|
pg_phys_addr = pg_it->GetAddress();
|
|
|
|
pg_pages = pg_it->GetNumPages();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map whatever we can.
|
|
|
|
const size_t cur_pages = std::min(pg_pages, map_pages);
|
|
|
|
ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
|
|
|
|
OperationType::Map, pg_phys_addr) == ResultSuccess);
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
cur_address += cur_pages * PageSize;
|
|
|
|
map_pages -= cur_pages;
|
|
|
|
|
|
|
|
pg_phys_addr += cur_pages * PageSize;
|
|
|
|
pg_pages -= cur_pages;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we're done.
|
|
|
|
if (last_map_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance.
|
|
|
|
++it;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Iterate over the memory, unmapping as we go.
|
2022-09-10 06:38:28 +02:00
|
|
|
auto it = memory_block_manager.FindIterator(cur_address);
|
2022-02-19 08:42:27 +01:00
|
|
|
while (true) {
|
|
|
|
// Check that the iterator is valid.
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.end());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Get the memory info.
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// If the memory state is normal, we need to unmap it.
|
|
|
|
if (info.GetState() == KMemoryState::Normal) {
|
|
|
|
// Determine the range to unmap.
|
|
|
|
const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
|
|
|
|
last_address + 1 - cur_address) /
|
|
|
|
PageSize;
|
|
|
|
|
|
|
|
// Unmap.
|
|
|
|
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Check if we're done.
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Advance.
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// Release the memory resource.
|
|
|
|
mapped_physical_memory_size -= mapped_size;
|
2022-01-15 08:00:55 +01:00
|
|
|
auto process{system.Kernel().CurrentProcess()};
|
|
|
|
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
|
2022-02-19 08:42:27 +01:00
|
|
|
|
|
|
|
// Update memory blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
|
|
|
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2022-02-19 08:42:27 +01:00
|
|
|
|
2022-02-26 19:46:31 +01:00
|
|
|
// TODO(bunnei): This is a workaround until the next set of changes, where we add reference
|
|
|
|
// counting for mapped pages. Until then, we must manually close the reference to the page
|
|
|
|
// group.
|
|
|
|
system.Kernel().MemoryManager().Close(pg);
|
|
|
|
|
2022-02-19 08:42:27 +01:00
|
|
|
// We succeeded.
|
|
|
|
remap_guard.Cancel();
|
2022-01-15 08:00:55 +01:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Validate that the source address's state is valid.
|
|
|
|
KMemoryState src_state;
|
|
|
|
size_t num_src_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr,
|
|
|
|
std::addressof(num_src_allocator_blocks), src_address, size,
|
|
|
|
KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
|
|
|
|
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Validate that the dst address's state is valid.
|
|
|
|
size_t num_dst_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
|
|
|
|
KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryAttribute::None));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator for the source.
|
|
|
|
Result src_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(
|
|
|
|
std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
|
|
|
|
|
|
|
// Create an update allocator for the destination.
|
|
|
|
Result dst_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(
|
|
|
|
std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
|
|
|
|
|
|
|
// Map the memory.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup page_linked_list;
|
2020-04-08 23:28:42 +02:00
|
|
|
const std::size_t num_pages{size / PageSize};
|
2022-09-10 06:38:28 +02:00
|
|
|
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(
|
|
|
|
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
|
|
|
|
const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
AddRegionToPages(src_address, num_pages, page_linked_list);
|
2020-04-08 23:28:42 +02:00
|
|
|
{
|
2022-09-10 06:38:28 +02:00
|
|
|
// Reprotect the source as kernel-read/not mapped.
|
2020-04-08 23:28:42 +02:00
|
|
|
auto block_guard = detail::ScopeExit([&] {
|
2022-09-10 06:38:28 +02:00
|
|
|
Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
|
2020-04-08 23:28:42 +02:00
|
|
|
OperationType::ChangePermissions);
|
|
|
|
});
|
2022-09-10 06:38:28 +02:00
|
|
|
R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions));
|
|
|
|
R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
block_guard.Cancel();
|
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Apply the memory block updates.
|
|
|
|
memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
|
|
|
|
new_src_perm, new_src_attr,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Locked,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
|
|
|
memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
|
|
|
|
KMemoryState::Stack, KMemoryPermission::UserReadWrite,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Validate that the source address's state is valid.
|
|
|
|
KMemoryState src_state;
|
|
|
|
size_t num_src_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(
|
|
|
|
std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks),
|
|
|
|
src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias,
|
|
|
|
KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::Locked));
|
|
|
|
|
|
|
|
// Validate that the dst address's state is valid.
|
|
|
|
KMemoryPermission dst_perm;
|
|
|
|
size_t num_dst_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(
|
|
|
|
nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks),
|
|
|
|
dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None));
|
|
|
|
|
|
|
|
// Create an update allocator for the source.
|
|
|
|
Result src_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(
|
|
|
|
std::addressof(src_allocator_result), memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator for the destination.
|
|
|
|
Result dst_allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(
|
|
|
|
std::addressof(dst_allocator_result), memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup src_pages;
|
|
|
|
KPageGroup dst_pages;
|
2020-04-08 23:28:42 +02:00
|
|
|
const std::size_t num_pages{size / PageSize};
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
AddRegionToPages(src_address, num_pages, src_pages);
|
|
|
|
AddRegionToPages(dst_address, num_pages, dst_pages);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
if (!dst_pages.IsEqual(src_pages)) {
|
2021-04-11 20:41:48 +02:00
|
|
|
return ResultInvalidMemoryRegion;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2022-09-10 06:38:28 +02:00
|
|
|
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
|
|
|
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
|
|
|
|
OperationType::ChangePermissions));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
block_guard.Cancel();
|
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Apply the memory block updates.
|
|
|
|
memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state,
|
|
|
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Locked);
|
|
|
|
memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages,
|
|
|
|
KMemoryState::None, KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
2022-06-26 05:44:19 +02:00
|
|
|
KMemoryPermission perm) {
|
2022-02-02 04:34:24 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
2020-04-08 23:28:42 +02:00
|
|
|
VAddr cur_addr{addr};
|
|
|
|
|
|
|
|
for (const auto& node : page_linked_list.Nodes()) {
|
2020-04-17 06:59:08 +02:00
|
|
|
if (const auto result{
|
2020-04-08 23:28:42 +02:00
|
|
|
Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
|
|
|
|
result.IsError()) {
|
|
|
|
const std::size_t num_pages{(addr - cur_addr) / PageSize};
|
|
|
|
|
2021-02-13 02:02:51 +01:00
|
|
|
ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
|
|
|
|
.IsSuccess());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_addr += node.GetNumPages() * PageSize;
|
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
|
2022-06-26 05:44:19 +02:00
|
|
|
KMemoryPermission perm) {
|
2022-01-15 09:15:58 +01:00
|
|
|
// Check that the map is in range.
|
2020-04-08 23:28:42 +02:00
|
|
|
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
|
|
|
const std::size_t size{num_pages * PageSize};
|
2022-01-15 09:15:58 +01:00
|
|
|
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Check the memory state.
|
|
|
|
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None, KMemoryAttribute::None));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager);
|
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Map the pages.
|
|
|
|
R_TRY(MapPages(address, page_linked_list, perm));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2021-04-30 23:53:22 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
|
|
|
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
|
|
|
std::size_t region_num_pages, KMemoryState state,
|
|
|
|
KMemoryPermission perm) {
|
2022-03-12 01:39:36 +01:00
|
|
|
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
|
|
|
|
|
|
|
// Ensure this is a valid map request.
|
|
|
|
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
|
|
|
|
ResultInvalidCurrentMemory);
|
|
|
|
R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
|
|
|
|
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Find a random address to map at.
|
|
|
|
VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
|
|
|
|
this->GetNumGuardPages());
|
|
|
|
R_UNLESS(addr != 0, ResultOutOfMemory);
|
|
|
|
ASSERT(Common::IsAligned(addr, alignment));
|
|
|
|
ASSERT(this->CanContain(addr, num_pages * PageSize, state));
|
|
|
|
ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None, KMemoryAttribute::None)
|
|
|
|
.IsSuccess());
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager);
|
|
|
|
|
2022-03-12 01:39:36 +01:00
|
|
|
// Perform mapping operation.
|
|
|
|
if (is_pa_valid) {
|
|
|
|
R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
|
|
|
|
} else {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2022-03-12 01:39:36 +01:00
|
|
|
|
|
|
|
// We successfully mapped the pages.
|
|
|
|
*out_addr = addr;
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
2022-02-02 04:34:24 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
2021-04-30 23:53:22 +02:00
|
|
|
VAddr cur_addr{addr};
|
|
|
|
|
|
|
|
for (const auto& node : page_linked_list.Nodes()) {
|
2022-02-19 08:48:16 +01:00
|
|
|
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
|
|
|
|
OperationType::Unmap)};
|
2021-04-30 23:53:22 +02:00
|
|
|
result.IsError()) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_addr += node.GetNumPages() * PageSize;
|
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2021-04-30 23:53:22 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
|
2022-01-15 09:15:58 +01:00
|
|
|
// Check that the unmap is in range.
|
2021-04-30 23:53:22 +02:00
|
|
|
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
|
|
|
const std::size_t size{num_pages * PageSize};
|
2022-09-10 06:38:28 +02:00
|
|
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
2021-04-30 23:53:22 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2021-04-30 23:53:22 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Check the memory state.
|
2022-09-10 06:38:28 +02:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
|
|
|
|
KMemoryState::All, state, KMemoryPermission::None,
|
2022-01-15 09:15:58 +01:00
|
|
|
KMemoryPermission::None, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::None));
|
2021-04-30 23:53:22 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Perform the unmap.
|
2022-09-10 06:38:28 +02:00
|
|
|
R_TRY(UnmapPages(address, page_linked_list));
|
2021-04-30 23:53:22 +02:00
|
|
|
|
2022-01-15 09:15:58 +01:00
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal);
|
2021-04-30 23:53:22 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
2022-03-12 01:39:36 +01:00
|
|
|
// Check that the unmap is in range.
|
|
|
|
const std::size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Check the memory state.
|
|
|
|
std::size_t num_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
|
|
|
|
KMemoryState::All, state, KMemoryPermission::None,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::None));
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-03-12 01:39:36 +01:00
|
|
|
// Perform the unmap.
|
|
|
|
R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
|
|
|
|
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Normal);
|
2022-03-12 01:39:36 +01:00
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
2022-06-26 05:44:19 +02:00
|
|
|
KMemoryState state_mask, KMemoryState state,
|
|
|
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
|
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
2022-03-26 09:25:52 +01:00
|
|
|
// Ensure that the page group isn't null.
|
|
|
|
ASSERT(out != nullptr);
|
|
|
|
|
|
|
|
// Make sure that the region we're mapping is valid for the table.
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Check if state allows us to create the group.
|
|
|
|
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
|
|
|
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
|
|
|
attr_mask, attr));
|
|
|
|
|
|
|
|
// Create a new page group for the region.
|
|
|
|
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
|
|
|
Svc::MemoryPermission svc_perm) {
|
2022-01-09 11:17:17 +01:00
|
|
|
const size_t num_pages = size / PageSize;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-09 11:17:17 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-09 11:17:17 +01:00
|
|
|
// Verify we can change the memory permission.
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
|
|
|
|
std::addressof(num_allocator_blocks), addr, size,
|
|
|
|
KMemoryState::FlagCode, KMemoryState::FlagCode,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
|
|
|
|
|
|
|
// Determine new perm/state.
|
|
|
|
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
|
|
|
|
KMemoryState new_state = old_state;
|
|
|
|
const bool is_w = (new_perm & KMemoryPermission::UserWrite) == KMemoryPermission::UserWrite;
|
|
|
|
const bool is_x = (new_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
|
|
|
|
const bool was_x =
|
|
|
|
(old_perm & KMemoryPermission::UserExecute) == KMemoryPermission::UserExecute;
|
|
|
|
ASSERT(!(is_w && is_x));
|
|
|
|
|
|
|
|
if (is_w) {
|
|
|
|
switch (old_state) {
|
|
|
|
case KMemoryState::Code:
|
|
|
|
new_state = KMemoryState::CodeData;
|
|
|
|
break;
|
|
|
|
case KMemoryState::AliasCode:
|
|
|
|
new_state = KMemoryState::AliasCodeData;
|
|
|
|
break;
|
|
|
|
default:
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(false);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-09 11:17:17 +01:00
|
|
|
// Succeed if there's nothing to do.
|
|
|
|
R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-01-09 11:17:17 +01:00
|
|
|
// Perform mapping operation.
|
|
|
|
const auto operation =
|
|
|
|
was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions;
|
|
|
|
R_TRY(Operate(addr, num_pages, new_perm, operation));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-09 11:17:17 +01:00
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2022-01-09 11:17:17 +01:00
|
|
|
|
|
|
|
// Ensure cache coherency, if we're setting pages as executable.
|
|
|
|
if (is_x) {
|
2020-11-14 08:20:32 +01:00
|
|
|
system.InvalidateCpuInstructionCacheRange(addr, size);
|
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
return memory_block_manager.FindBlock(addr)->GetMemoryInfo();
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
|
2020-04-08 23:28:42 +02:00
|
|
|
if (!Contains(addr, 1)) {
|
2022-09-10 06:38:28 +02:00
|
|
|
return {
|
|
|
|
.m_address = address_space_end,
|
|
|
|
.m_size = 0 - address_space_end,
|
|
|
|
.m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible),
|
|
|
|
.m_device_disable_merge_left_count = 0,
|
|
|
|
.m_device_disable_merge_right_count = 0,
|
|
|
|
.m_ipc_lock_count = 0,
|
|
|
|
.m_device_use_count = 0,
|
|
|
|
.m_ipc_disable_merge_count = 0,
|
|
|
|
.m_permission = KMemoryPermission::None,
|
|
|
|
.m_attribute = KMemoryAttribute::None,
|
|
|
|
.m_original_permission = KMemoryPermission::None,
|
|
|
|
.m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
};
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return QueryInfoImpl(addr);
|
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
|
|
|
Svc::MemoryPermission svc_perm) {
|
2021-12-23 10:10:36 +01:00
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2021-12-23 10:10:36 +01:00
|
|
|
|
|
|
|
// Verify we can change the memory permission.
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
2022-09-10 06:38:28 +02:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr,
|
|
|
|
std::addressof(num_allocator_blocks), addr, size,
|
|
|
|
KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
2021-12-23 10:10:36 +01:00
|
|
|
|
|
|
|
// Determine new perm.
|
|
|
|
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
|
|
|
|
R_SUCCEED_IF(old_perm == new_perm);
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2021-12-23 10:10:36 +01:00
|
|
|
// Perform mapping operation.
|
|
|
|
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
|
|
|
|
|
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
|
2022-01-08 12:16:59 +01:00
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
|
|
|
|
KMemoryAttribute::SetMask);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:16:59 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:16:59 +01:00
|
|
|
// Verify we can change the memory attribute.
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
KMemoryAttribute old_attr;
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
constexpr auto AttributeTestMask =
|
|
|
|
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
|
|
|
|
R_TRY(this->CheckMemoryState(
|
|
|
|
std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
|
|
|
|
std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute,
|
2021-02-13 02:02:51 +01:00
|
|
|
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
2022-01-08 12:16:59 +01:00
|
|
|
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-01-08 12:16:59 +01:00
|
|
|
// Determine the new attribute.
|
2022-01-15 08:26:04 +01:00
|
|
|
const KMemoryAttribute new_attr =
|
|
|
|
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
|
|
|
|
static_cast<KMemoryAttribute>(attr & mask)));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:16:59 +01:00
|
|
|
// Perform operation.
|
|
|
|
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:16:59 +01:00
|
|
|
// Update the blocks.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
|
|
|
|
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::SetMaxHeapSize(std::size_t size) {
|
2021-12-28 09:18:41 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Only process page tables are allowed to set heap size.
|
|
|
|
ASSERT(!this->IsKernel());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
max_heap_size = size;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
return ResultSuccess;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
2022-02-02 04:34:24 +01:00
|
|
|
// Lock the physical memory mutex.
|
|
|
|
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
2022-01-15 08:20:30 +01:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Try to perform a reduction in heap, instead of an extension.
|
|
|
|
VAddr cur_address{};
|
|
|
|
std::size_t allocation_size{};
|
2020-04-08 23:28:42 +02:00
|
|
|
{
|
2021-12-28 09:18:41 +01:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2021-12-28 09:18:41 +01:00
|
|
|
|
|
|
|
// Validate that setting heap size is possible at all.
|
|
|
|
R_UNLESS(!is_kernel, ResultOutOfMemory);
|
|
|
|
R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start),
|
|
|
|
ResultOutOfMemory);
|
|
|
|
R_UNLESS(size <= max_heap_size, ResultOutOfMemory);
|
|
|
|
|
|
|
|
if (size < GetHeapSize()) {
|
|
|
|
// The size being requested is less than the current size, so we need to free the end of
|
|
|
|
// the heap.
|
|
|
|
|
|
|
|
// Validate memory state.
|
|
|
|
std::size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
|
|
|
|
heap_region_start + size, GetHeapSize() - size,
|
|
|
|
KMemoryState::All, KMemoryState::Normal,
|
2022-01-09 08:20:16 +01:00
|
|
|
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
2021-12-28 09:18:41 +01:00
|
|
|
KMemoryAttribute::All, KMemoryAttribute::None));
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(
|
|
|
|
std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Unmap the end of the heap.
|
|
|
|
const auto num_pages = (GetHeapSize() - size) / PageSize;
|
|
|
|
R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None,
|
|
|
|
OperationType::Unmap));
|
|
|
|
|
|
|
|
// Release the memory from the resource limit.
|
|
|
|
system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
|
|
|
|
LimitableResource::PhysicalMemory, num_pages * PageSize);
|
|
|
|
|
|
|
|
// Apply the memory block update.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), heap_region_start + size,
|
|
|
|
num_pages, KMemoryState::Free, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
size == 0 ? KMemoryBlockDisableMergeAttribute::Normal
|
|
|
|
: KMemoryBlockDisableMergeAttribute::None);
|
2021-12-28 09:18:41 +01:00
|
|
|
|
|
|
|
// Update the current heap end.
|
|
|
|
current_heap_end = heap_region_start + size;
|
|
|
|
|
|
|
|
// Set the output.
|
|
|
|
*out = heap_region_start;
|
|
|
|
return ResultSuccess;
|
|
|
|
} else if (size == GetHeapSize()) {
|
|
|
|
// The size requested is exactly the current size.
|
|
|
|
*out = heap_region_start;
|
|
|
|
return ResultSuccess;
|
|
|
|
} else {
|
|
|
|
// We have to allocate memory. Determine how much to allocate and where while the table
|
|
|
|
// is locked.
|
|
|
|
cur_address = current_heap_end;
|
|
|
|
allocation_size = size - GetHeapSize();
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
2021-12-28 09:18:41 +01:00
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Reserve memory for the heap extension.
|
|
|
|
KScopedResourceReservation memory_reservation(
|
|
|
|
system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
|
|
|
|
allocation_size);
|
|
|
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Allocate pages for the heap extension.
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup pg;
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
|
|
|
&pg, allocation_size / PageSize,
|
|
|
|
KMemoryManager::EncodeOption(memory_pool, allocation_option)));
|
|
|
|
|
|
|
|
// Clear all the newly allocated pages.
|
|
|
|
for (const auto& it : pg.Nodes()) {
|
2022-09-06 02:42:24 +02:00
|
|
|
std::memset(system.DeviceMemory().GetPointer<void>(it.GetAddress()), heap_fill_value,
|
2022-02-26 19:46:31 +01:00
|
|
|
it.GetSize());
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Map the pages.
|
|
|
|
{
|
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2021-12-28 09:18:41 +01:00
|
|
|
|
|
|
|
// Ensure that the heap hasn't changed since we began executing.
|
|
|
|
ASSERT(cur_address == current_heap_end);
|
|
|
|
|
|
|
|
// Check the memory state.
|
|
|
|
std::size_t num_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end,
|
|
|
|
allocation_size, KMemoryState::All, KMemoryState::Free,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::None, KMemoryAttribute::None));
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(
|
|
|
|
std::addressof(allocator_result), memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Map the pages.
|
|
|
|
const auto num_pages = allocation_size / PageSize;
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
|
2021-12-28 09:18:41 +01:00
|
|
|
|
|
|
|
// Clear all the newly allocated pages.
|
|
|
|
for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
|
|
|
|
std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0,
|
|
|
|
PageSize);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// We succeeded, so commit our memory reservation.
|
2021-02-05 02:06:54 +01:00
|
|
|
memory_reservation.Commit();
|
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Apply the memory block update.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(
|
|
|
|
std::addressof(allocator), current_heap_end, num_pages, KMemoryState::Normal,
|
|
|
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
|
|
|
|
heap_region_start == current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal
|
|
|
|
: KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Update the current heap end.
|
|
|
|
current_heap_end = heap_region_start + size;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-12-28 09:18:41 +01:00
|
|
|
// Set the output.
|
|
|
|
*out = heap_region_start;
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
|
|
|
bool is_map_only, VAddr region_start,
|
|
|
|
std::size_t region_num_pages, KMemoryState state,
|
|
|
|
KMemoryPermission perm, PAddr map_addr) {
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
if (!CanContain(region_start, region_num_pages * PageSize, state)) {
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (region_num_pages <= needed_num_pages) {
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultOutOfMemory;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
const VAddr addr{
|
|
|
|
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
|
|
|
|
if (!addr) {
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultOutOfMemory;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager);
|
|
|
|
|
2020-04-08 23:28:42 +02:00
|
|
|
if (is_map_only) {
|
2022-01-15 08:20:30 +01:00
|
|
|
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
2020-04-08 23:28:42 +02:00
|
|
|
} else {
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup page_group;
|
2022-02-26 19:46:31 +01:00
|
|
|
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
|
|
|
&page_group, needed_num_pages,
|
|
|
|
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
2022-01-15 08:20:30 +01:00
|
|
|
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Update the blocks.
|
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm,
|
|
|
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-11-02 22:23:19 +01:00
|
|
|
return addr;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, std::size_t size) {
|
|
|
|
// Lightly validate the range before doing anything else.
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
2020-04-23 17:37:12 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Lock the table.
|
2022-02-02 04:34:24 +01:00
|
|
|
KScopedLightLock lk(general_lock);
|
2020-04-23 17:37:12 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Check the memory state.
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(
|
|
|
|
std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap,
|
|
|
|
KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None,
|
|
|
|
KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
|
2020-04-23 17:37:12 +02:00
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
// Update the memory blocks.
|
|
|
|
memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
|
|
|
&KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
|
2020-04-23 17:37:12 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-23 17:37:12 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
|
2022-03-26 09:49:34 +01:00
|
|
|
return this->LockMemoryAndOpen(
|
2022-06-09 18:33:28 +02:00
|
|
|
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
|
|
|
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::None,
|
2022-03-26 09:49:34 +01:00
|
|
|
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
|
|
|
KMemoryPermission::KernelReadWrite),
|
|
|
|
KMemoryAttribute::Locked);
|
2021-12-05 21:04:08 +01:00
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
|
2022-06-09 18:33:28 +02:00
|
|
|
return this->UnlockMemory(
|
|
|
|
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
|
|
|
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
|
|
|
|
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
|
2021-12-05 21:04:08 +01:00
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
|
2022-09-06 02:42:24 +02:00
|
|
|
auto start_ptr = system.DeviceMemory().GetPointer<u8>(addr);
|
2020-04-08 23:28:42 +02:00
|
|
|
for (u64 offset{}; offset < size; offset += PageSize) {
|
2022-09-06 02:42:24 +02:00
|
|
|
if (start_ptr != system.DeviceMemory().GetPointer<u8>(addr + offset)) {
|
2020-04-08 23:28:42 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
start_ptr += PageSize;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
|
2022-06-26 06:15:31 +02:00
|
|
|
KPageGroup& page_linked_list) {
|
2020-04-08 23:28:42 +02:00
|
|
|
VAddr addr{start};
|
|
|
|
while (addr < start + (num_pages * PageSize)) {
|
|
|
|
const PAddr paddr{GetPhysicalAddr(addr)};
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(paddr != 0);
|
2020-04-08 23:28:42 +02:00
|
|
|
page_linked_list.AddBlock(paddr, 1);
|
|
|
|
addr += PageSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-13 02:58:31 +01:00
|
|
|
VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages,
|
|
|
|
u64 needed_num_pages, std::size_t align) {
|
2020-04-08 23:28:42 +02:00
|
|
|
if (is_aslr_enabled) {
|
|
|
|
UNIMPLEMENTED();
|
|
|
|
}
|
2022-09-10 06:38:28 +02:00
|
|
|
return memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
|
|
|
|
IsKernel() ? 1 : 4);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
2022-06-26 05:44:19 +02:00
|
|
|
OperationType operation) {
|
2022-01-15 08:21:25 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
ASSERT(Common::IsAligned(addr, PageSize));
|
|
|
|
ASSERT(num_pages > 0);
|
|
|
|
ASSERT(num_pages == page_group.GetNumPages());
|
|
|
|
|
|
|
|
for (const auto& node : page_group.Nodes()) {
|
|
|
|
const std::size_t size{node.GetNumPages() * PageSize};
|
|
|
|
|
|
|
|
switch (operation) {
|
|
|
|
case OperationType::MapGroup:
|
|
|
|
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
|
|
|
|
break;
|
|
|
|
default:
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(false);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
addr += size;
|
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
|
|
|
OperationType operation, PAddr map_addr) {
|
2022-01-15 08:21:25 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
ASSERT(num_pages > 0);
|
|
|
|
ASSERT(Common::IsAligned(addr, PageSize));
|
|
|
|
ASSERT(ContainsPages(addr, num_pages));
|
|
|
|
|
|
|
|
switch (operation) {
|
|
|
|
case OperationType::Unmap:
|
|
|
|
system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize);
|
|
|
|
break;
|
|
|
|
case OperationType::Map: {
|
|
|
|
ASSERT(map_addr);
|
|
|
|
ASSERT(Common::IsAligned(map_addr, PageSize));
|
|
|
|
system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case OperationType::ChangePermissions:
|
|
|
|
case OperationType::ChangePermissionsAndRefresh:
|
|
|
|
break;
|
|
|
|
default:
|
2022-06-07 23:02:29 +02:00
|
|
|
ASSERT(false);
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-02-26 19:15:04 +01:00
|
|
|
VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
2020-04-08 23:28:42 +02:00
|
|
|
switch (state) {
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Free:
|
|
|
|
case KMemoryState::Kernel:
|
2020-04-08 23:28:42 +02:00
|
|
|
return address_space_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Normal:
|
2020-04-08 23:28:42 +02:00
|
|
|
return heap_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Ipc:
|
|
|
|
case KMemoryState::NonSecureIpc:
|
|
|
|
case KMemoryState::NonDeviceIpc:
|
2020-04-08 23:28:42 +02:00
|
|
|
return alias_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Stack:
|
2020-04-08 23:28:42 +02:00
|
|
|
return stack_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Static:
|
|
|
|
case KMemoryState::ThreadLocal:
|
2020-04-08 23:28:42 +02:00
|
|
|
return kernel_map_region_start;
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Io:
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Shared:
|
|
|
|
case KMemoryState::AliasCode:
|
|
|
|
case KMemoryState::AliasCodeData:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Transfered:
|
|
|
|
case KMemoryState::SharedTransfered:
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::SharedCode:
|
|
|
|
case KMemoryState::GeneratedCode:
|
|
|
|
case KMemoryState::CodeOut:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Coverage:
|
2020-04-08 23:28:42 +02:00
|
|
|
return alias_code_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Code:
|
|
|
|
case KMemoryState::CodeData:
|
2020-04-08 23:28:42 +02:00
|
|
|
return code_region_start;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-26 19:15:04 +01:00
|
|
|
std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
2020-04-08 23:28:42 +02:00
|
|
|
switch (state) {
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Free:
|
|
|
|
case KMemoryState::Kernel:
|
2020-04-08 23:28:42 +02:00
|
|
|
return address_space_end - address_space_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Normal:
|
2020-04-08 23:28:42 +02:00
|
|
|
return heap_region_end - heap_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Ipc:
|
|
|
|
case KMemoryState::NonSecureIpc:
|
|
|
|
case KMemoryState::NonDeviceIpc:
|
2020-04-08 23:28:42 +02:00
|
|
|
return alias_region_end - alias_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Stack:
|
2020-04-08 23:28:42 +02:00
|
|
|
return stack_region_end - stack_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Static:
|
|
|
|
case KMemoryState::ThreadLocal:
|
2020-04-08 23:28:42 +02:00
|
|
|
return kernel_map_region_end - kernel_map_region_start;
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Io:
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Shared:
|
|
|
|
case KMemoryState::AliasCode:
|
|
|
|
case KMemoryState::AliasCodeData:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Transfered:
|
|
|
|
case KMemoryState::SharedTransfered:
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::SharedCode:
|
|
|
|
case KMemoryState::GeneratedCode:
|
|
|
|
case KMemoryState::CodeOut:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Coverage:
|
2020-04-08 23:28:42 +02:00
|
|
|
return alias_code_region_end - alias_code_region_start;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Code:
|
|
|
|
case KMemoryState::CodeData:
|
2020-04-08 23:28:42 +02:00
|
|
|
return code_region_end - code_region_start;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-05 07:14:06 +02:00
|
|
|
bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const {
|
2022-01-08 12:13:17 +01:00
|
|
|
const VAddr end = addr + size;
|
|
|
|
const VAddr last = end - 1;
|
|
|
|
|
|
|
|
const VAddr region_start = this->GetRegionAddress(state);
|
|
|
|
const size_t region_size = this->GetRegionSize(state);
|
|
|
|
|
|
|
|
const bool is_in_region =
|
|
|
|
region_start <= addr && addr < end && last <= region_start + region_size - 1;
|
|
|
|
const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr ||
|
|
|
|
heap_region_start == heap_region_end);
|
|
|
|
const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr ||
|
|
|
|
alias_region_start == alias_region_end);
|
2020-04-08 23:28:42 +02:00
|
|
|
switch (state) {
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Free:
|
|
|
|
case KMemoryState::Kernel:
|
2020-04-08 23:28:42 +02:00
|
|
|
return is_in_region;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Io:
|
|
|
|
case KMemoryState::Static:
|
|
|
|
case KMemoryState::Code:
|
|
|
|
case KMemoryState::CodeData:
|
|
|
|
case KMemoryState::Shared:
|
|
|
|
case KMemoryState::AliasCode:
|
|
|
|
case KMemoryState::AliasCodeData:
|
|
|
|
case KMemoryState::Stack:
|
|
|
|
case KMemoryState::ThreadLocal:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Transfered:
|
|
|
|
case KMemoryState::SharedTransfered:
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::SharedCode:
|
|
|
|
case KMemoryState::GeneratedCode:
|
|
|
|
case KMemoryState::CodeOut:
|
2022-01-08 12:13:17 +01:00
|
|
|
case KMemoryState::Coverage:
|
2020-04-08 23:28:42 +02:00
|
|
|
return is_in_region && !is_in_heap && !is_in_alias;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Normal:
|
2020-04-08 23:28:42 +02:00
|
|
|
ASSERT(is_in_heap);
|
|
|
|
return is_in_region && !is_in_alias;
|
2021-02-13 02:02:51 +01:00
|
|
|
case KMemoryState::Ipc:
|
|
|
|
case KMemoryState::NonSecureIpc:
|
|
|
|
case KMemoryState::NonDeviceIpc:
|
2020-04-08 23:28:42 +02:00
|
|
|
ASSERT(is_in_alias);
|
|
|
|
return is_in_region && !is_in_heap;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
|
|
|
KMemoryState state, KMemoryPermission perm_mask,
|
|
|
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
|
|
KMemoryAttribute attr) const {
|
2022-01-08 12:13:17 +01:00
|
|
|
// Validate the states match expectation.
|
2022-09-10 06:38:28 +02:00
|
|
|
R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory);
|
|
|
|
R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
|
|
|
|
R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
|
|
|
std::size_t size, KMemoryState state_mask,
|
|
|
|
KMemoryState state, KMemoryPermission perm_mask,
|
|
|
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
|
|
KMemoryAttribute attr) const {
|
2022-01-08 12:13:17 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// Get information about the first block.
|
|
|
|
const VAddr last_addr = addr + size - 1;
|
2022-09-10 06:38:28 +02:00
|
|
|
KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr);
|
2022-01-08 12:13:17 +01:00
|
|
|
KMemoryInfo info = it->GetMemoryInfo();
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// If the start address isn't aligned, we need a block.
|
|
|
|
const size_t blocks_for_start_align =
|
|
|
|
(Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
|
2020-04-08 23:28:42 +02:00
|
|
|
|
|
|
|
while (true) {
|
2022-01-08 12:13:17 +01:00
|
|
|
// Validate against the provided masks.
|
|
|
|
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
2020-04-08 23:28:42 +02:00
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// Break once we're done.
|
2020-04-08 23:28:42 +02:00
|
|
|
if (last_addr <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// Advance our iterator.
|
2020-04-08 23:28:42 +02:00
|
|
|
it++;
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.cend());
|
2020-04-08 23:28:42 +02:00
|
|
|
info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// If the end address isn't aligned, we need a block.
|
|
|
|
const size_t blocks_for_end_align =
|
|
|
|
(Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
|
|
|
|
|
|
|
if (out_blocks_needed != nullptr) {
|
|
|
|
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2021-05-21 07:05:04 +02:00
|
|
|
return ResultSuccess;
|
2020-04-08 23:28:42 +02:00
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
|
|
|
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
|
|
|
VAddr addr, std::size_t size, KMemoryState state_mask,
|
|
|
|
KMemoryState state, KMemoryPermission perm_mask,
|
|
|
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
|
|
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
2022-01-08 12:13:17 +01:00
|
|
|
ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
2021-12-05 21:04:08 +01:00
|
|
|
// Get information about the first block.
|
|
|
|
const VAddr last_addr = addr + size - 1;
|
2022-09-10 06:38:28 +02:00
|
|
|
KMemoryBlockManager::const_iterator it = memory_block_manager.FindIterator(addr);
|
2021-12-05 21:04:08 +01:00
|
|
|
KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
// If the start address isn't aligned, we need a block.
|
|
|
|
const size_t blocks_for_start_align =
|
|
|
|
(Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
|
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// Validate all blocks in the range have correct state.
|
2022-09-10 06:38:28 +02:00
|
|
|
const KMemoryState first_state = info.m_state;
|
|
|
|
const KMemoryPermission first_perm = info.m_permission;
|
|
|
|
const KMemoryAttribute first_attr = info.m_attribute;
|
2021-12-05 21:04:08 +01:00
|
|
|
while (true) {
|
2022-01-08 12:13:17 +01:00
|
|
|
// Validate the current block.
|
2022-09-10 06:38:28 +02:00
|
|
|
R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory);
|
|
|
|
R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory);
|
|
|
|
R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr),
|
2022-01-08 12:13:17 +01:00
|
|
|
ResultInvalidCurrentMemory);
|
|
|
|
|
2021-12-05 21:04:08 +01:00
|
|
|
// Validate against the provided masks.
|
2022-01-08 12:13:17 +01:00
|
|
|
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
2021-12-05 21:04:08 +01:00
|
|
|
|
|
|
|
// Break once we're done.
|
|
|
|
if (last_addr <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance our iterator.
|
|
|
|
it++;
|
2022-09-10 06:38:28 +02:00
|
|
|
ASSERT(it != memory_block_manager.cend());
|
2021-12-05 21:04:08 +01:00
|
|
|
info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the end address isn't aligned, we need a block.
|
|
|
|
const size_t blocks_for_end_align =
|
|
|
|
(Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
|
|
|
|
2022-01-08 12:13:17 +01:00
|
|
|
// Write output state.
|
|
|
|
if (out_state != nullptr) {
|
|
|
|
*out_state = first_state;
|
|
|
|
}
|
|
|
|
if (out_perm != nullptr) {
|
|
|
|
*out_perm = first_perm;
|
|
|
|
}
|
|
|
|
if (out_attr != nullptr) {
|
|
|
|
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
|
|
|
}
|
2021-12-05 21:04:08 +01:00
|
|
|
if (out_blocks_needed != nullptr) {
|
|
|
|
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
|
|
|
}
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 06:15:31 +02:00
|
|
|
Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
|
|
|
KMemoryState state_mask, KMemoryState state,
|
2022-06-26 05:44:19 +02:00
|
|
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
|
|
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
|
|
|
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
2022-03-26 09:46:41 +01:00
|
|
|
// Validate basic preconditions.
|
|
|
|
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
|
|
|
|
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
|
|
|
KMemoryAttribute::None);
|
|
|
|
|
|
|
|
// Validate the lock request.
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Check that the output page group is empty, if it exists.
|
|
|
|
if (out_pg) {
|
|
|
|
ASSERT(out_pg->GetNumPages() == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state.
|
|
|
|
KMemoryState old_state{};
|
|
|
|
KMemoryPermission old_perm{};
|
|
|
|
KMemoryAttribute old_attr{};
|
|
|
|
size_t num_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
|
|
|
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
|
|
|
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
|
|
|
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
|
|
|
attr_mask, attr));
|
|
|
|
|
|
|
|
// Get the physical address, if we're supposed to.
|
|
|
|
if (out_paddr != nullptr) {
|
|
|
|
ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make the page group, if we're supposed to.
|
|
|
|
if (out_pg != nullptr) {
|
|
|
|
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
|
|
|
|
}
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-03-26 09:46:41 +01:00
|
|
|
// Decide on new perm and attr.
|
|
|
|
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
|
|
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
|
|
|
|
|
|
|
|
// Update permission, if we need to.
|
|
|
|
if (new_perm != old_perm) {
|
|
|
|
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply the memory block updates.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
|
|
|
|
new_attr, KMemoryBlockDisableMergeAttribute::Locked,
|
|
|
|
KMemoryBlockDisableMergeAttribute::None);
|
2022-03-26 09:46:41 +01:00
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2022-06-26 05:44:19 +02:00
|
|
|
Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
|
|
|
KMemoryState state, KMemoryPermission perm_mask,
|
|
|
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
|
|
|
KMemoryAttribute attr, KMemoryPermission new_perm,
|
2022-06-26 06:15:31 +02:00
|
|
|
KMemoryAttribute lock_attr, const KPageGroup* pg) {
|
2022-03-26 09:46:41 +01:00
|
|
|
// Validate basic preconditions.
|
|
|
|
ASSERT((attr_mask & lock_attr) == lock_attr);
|
|
|
|
ASSERT((attr & lock_attr) == lock_attr);
|
|
|
|
|
|
|
|
// Validate the unlock request.
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
// Lock the table.
|
|
|
|
KScopedLightLock lk(general_lock);
|
|
|
|
|
|
|
|
// Check the state.
|
|
|
|
KMemoryState old_state{};
|
|
|
|
KMemoryPermission old_perm{};
|
|
|
|
KMemoryAttribute old_attr{};
|
|
|
|
size_t num_allocator_blocks{};
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
|
|
|
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
|
|
|
addr, size, state_mask | KMemoryState::FlagReferenceCounted,
|
|
|
|
state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
|
|
|
|
attr_mask, attr));
|
|
|
|
|
|
|
|
// Check the page group.
|
|
|
|
if (pg != nullptr) {
|
2022-06-09 18:33:28 +02:00
|
|
|
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
|
2022-03-26 09:46:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Decide on new perm and attr.
|
|
|
|
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
|
|
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
|
|
|
|
|
2022-09-10 06:38:28 +02:00
|
|
|
// Create an update allocator.
|
|
|
|
Result allocator_result{ResultSuccess};
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
|
|
|
memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
2022-03-26 09:46:41 +01:00
|
|
|
// Update permission, if we need to.
|
|
|
|
if (new_perm != old_perm) {
|
|
|
|
R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply the memory block updates.
|
2022-09-10 06:38:28 +02:00
|
|
|
memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm,
|
|
|
|
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
|
|
|
KMemoryBlockDisableMergeAttribute::Locked);
|
2022-03-26 09:46:41 +01:00
|
|
|
|
|
|
|
return ResultSuccess;
|
|
|
|
}
|
|
|
|
|
2021-12-06 16:37:09 +01:00
|
|
|
} // namespace Kernel
|