Kernel/Memory: Give each Process its own page table.

The loader is in charge of setting the newly created process's page table as the main one during the loading process.
This commit is contained in:
Subv 2017-07-21 21:17:57 -05:00
parent 5d0a1e7efd
commit 6d2734a074
9 changed files with 93 additions and 87 deletions

View file

@ -137,7 +137,6 @@ void System::Reschedule() {
}
System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
Memory::InitMemoryMap();
LOG_DEBUG(HW_Memory, "initialized OK");
if (Settings::values.use_cpu_jit) {

View file

@ -56,6 +56,10 @@ void VMManager::Reset() {
initial_vma.size = MAX_ADDRESS;
vma_map.emplace(initial_vma.base, initial_vma);
page_table.pointers.fill(nullptr);
page_table.attributes.fill(Memory::PageType::Unmapped);
page_table.cached_res_count.fill(0);
UpdatePageTableForVMA(initial_vma);
}
@ -328,16 +332,17 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
switch (vma.type) {
case VMAType::Free:
Memory::UnmapRegion(vma.base, vma.size);
Memory::UnmapRegion(page_table, vma.base, vma.size);
break;
case VMAType::AllocatedMemoryBlock:
Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
Memory::MapMemoryRegion(page_table, vma.base, vma.size,
vma.backing_block->data() + vma.offset);
break;
case VMAType::BackingMemory:
Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
break;
case VMAType::MMIO:
Memory::MapIoRegion(vma.base, vma.size, vma.mmio_handler);
Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
break;
}
}

View file

@ -9,6 +9,7 @@
#include <vector>
#include "common/common_types.h"
#include "core/hle/result.h"
#include "core/memory.h"
#include "core/mmio.h"
namespace Kernel {
@ -102,7 +103,6 @@ struct VirtualMemoryArea {
* - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
*/
class VMManager final {
// TODO(yuriks): Make page tables switchable to support multiple VMManagers
public:
/**
* The maximum amount of address space managed by the kernel. Addresses above this are never
@ -184,6 +184,10 @@ public:
/// Dumps the address space layout to the log, for debugging
void LogLayout(Log::Level log_level) const;
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;
private:
using VMAIter = decltype(vma_map)::iterator;

View file

@ -270,6 +270,7 @@ ResultStatus AppLoader_THREEDSX::Load() {
Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
Kernel::g_current_process->svc_access_mask.set();
Kernel::g_current_process->address_mappings = default_address_mappings;
Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
// Attach the default resource limit (APPLICATION) to the process
Kernel::g_current_process->resource_limit =

View file

@ -397,6 +397,7 @@ ResultStatus AppLoader_ELF::Load() {
Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
Kernel::g_current_process->svc_access_mask.set();
Kernel::g_current_process->address_mappings = default_address_mappings;
Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
// Attach the default resource limit (APPLICATION) to the process
Kernel::g_current_process->resource_limit =

View file

@ -172,6 +172,7 @@ ResultStatus AppLoader_NCCH::LoadExec() {
codeset->memory = std::make_shared<std::vector<u8>>(std::move(code));
Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
// Attach a resource limit to the process based on the resource limit category
Kernel::g_current_process->resource_limit =

View file

@ -11,75 +11,18 @@
#include "core/hle/kernel/process.h"
#include "core/memory.h"
#include "core/memory_setup.h"
#include "core/mmio.h"
#include "video_core/renderer_base.h"
#include "video_core/video_core.h"
namespace Memory {
enum class PageType {
/// Page is unmapped and should cause an access error.
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
Special,
/// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedSpecial,
};
struct SpecialRegion {
VAddr base;
u32 size;
MMIORegionPointer handler;
};
/**
* A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
* mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
* fetching requirements when accessing. In the usual case of an access to regular memory, it only
* requires an indexed fetch and a check for NULL.
*/
struct PageTable {
/**
* Array of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` array is of type `Memory`.
*/
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
/**
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
* type `Special`.
*/
std::vector<SpecialRegion> special_regions;
/**
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then
* the corresponding entry in `pointers` MUST be set to null.
*/
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
/**
* Indicates the number of externally cached resources touching a page that should be
* flushed before the memory is accessed
*/
std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
};
/// Singular page table used for the singleton process
static PageTable main_page_table;
/// Currently active page table
static PageTable* current_page_table = &main_page_table;
PageTable* current_page_table = nullptr;
std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
return &current_page_table->pointers;
}
static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
(base + size) * PAGE_SIZE);
@ -90,9 +33,9 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
while (base != end) {
ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
current_page_table->attributes[base] = type;
current_page_table->pointers[base] = memory;
current_page_table->cached_res_count[base] = 0;
page_table.attributes[base] = type;
page_table.pointers[base] = memory;
page_table.cached_res_count[base] = 0;
base += 1;
if (memory != nullptr)
@ -100,30 +43,24 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
}
}
void InitMemoryMap() {
main_page_table.pointers.fill(nullptr);
main_page_table.attributes.fill(PageType::Unmapped);
main_page_table.cached_res_count.fill(0);
}
void MapMemoryRegion(VAddr base, u32 size, u8* target) {
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
}
void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) {
void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
}
void UnmapRegion(VAddr base, u32 size) {
void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
}
/**

View file

@ -7,8 +7,10 @@
#include <array>
#include <cstddef>
#include <string>
#include <vector>
#include <boost/optional.hpp>
#include "common/common_types.h"
#include "core/mmio.h"
namespace Memory {
@ -21,6 +23,59 @@ const u32 PAGE_MASK = PAGE_SIZE - 1;
const int PAGE_BITS = 12;
const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
enum class PageType {
/// Page is unmapped and should cause an access error.
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
Special,
/// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedSpecial,
};
struct SpecialRegion {
VAddr base;
u32 size;
MMIORegionPointer handler;
};
/**
* A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
* mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
* fetching requirements when accessing. In the usual case of an access to regular memory, it only
* requires an indexed fetch and a check for NULL.
*/
struct PageTable {
/**
* Array of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` array is of type `Memory`.
*/
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
/**
* Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
* type `Special`.
*/
std::vector<SpecialRegion> special_regions;
/**
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then
* the corresponding entry in `pointers` MUST be set to null.
*/
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
/**
* Indicates the number of externally cached resources touching a page that should be
* flushed before the memory is accessed
*/
std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
};
/// Physical memory regions as seen from the ARM11
enum : PAddr {
/// IO register area
@ -126,6 +181,9 @@ enum : VAddr {
NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
};
/// Currently active page table
extern PageTable* current_page_table;
bool IsValidVirtualAddress(const VAddr addr);
bool IsValidPhysicalAddress(const PAddr addr);
@ -209,4 +267,4 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
* retrieve the current page table for that purpose.
*/
std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers();
}
} // namespace Memory

View file

@ -9,24 +9,24 @@
namespace Memory {
void InitMemoryMap();
/**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/
void MapMemoryRegion(VAddr base, u32 size, u8* target);
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler);
void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler);
void UnmapRegion(VAddr base, u32 size);
void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
}