Merge pull request #4500 from wwylele/sync-cache

Memory: sync rasterizer cache mark to fix LLE applet crash
This commit is contained in:
Weiyi Wang 2018-12-19 19:45:48 -05:00 committed by GitHub
commit 5c75974909
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 158 additions and 98 deletions

View file

@ -418,7 +418,6 @@ add_library(core STATIC
loader/smdh.h loader/smdh.h
memory.cpp memory.cpp
memory.h memory.h
memory_setup.h
mmio.h mmio.h
movie.cpp movie.cpp
movie.h movie.h

View file

@ -25,7 +25,6 @@
#include "core/hle/service/sm/sm.h" #include "core/hle/service/sm/sm.h"
#include "core/hw/hw.h" #include "core/hw/hw.h"
#include "core/loader/loader.h" #include "core/loader/loader.h"
#include "core/memory_setup.h"
#include "core/movie.h" #include "core/movie.h"
#ifdef ENABLE_SCRIPTING #ifdef ENABLE_SCRIPTING
#include "core/rpc/rpc_server.h" #include "core/rpc/rpc_server.h"

View file

@ -276,12 +276,10 @@ private:
public: // HACK: this is temporary exposed for tests, public: // HACK: this is temporary exposed for tests,
// due to WIP kernel refactor causing desync state in memory // due to WIP kernel refactor causing desync state in memory
std::unique_ptr<Memory::MemorySystem> memory;
std::unique_ptr<Kernel::KernelSystem> kernel; std::unique_ptr<Kernel::KernelSystem> kernel;
std::unique_ptr<Timing> timing; std::unique_ptr<Timing> timing;
/// Memory system
std::unique_ptr<Memory::MemorySystem> memory;
private: private:
static System s_instance; static System s_instance;

View file

@ -19,7 +19,6 @@
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
#include "core/hle/result.h" #include "core/hle/result.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////

View file

@ -402,8 +402,13 @@ ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission pe
} }
Kernel::Process::Process(KernelSystem& kernel) Kernel::Process::Process(KernelSystem& kernel)
: Object(kernel), handle_table(kernel), kernel(kernel) {} : Object(kernel), handle_table(kernel), kernel(kernel), vm_manager(kernel.memory) {
Kernel::Process::~Process() {}
kernel.memory.RegisterPageTable(&vm_manager.page_table);
}
Kernel::Process::~Process() {
kernel.memory.UnregisterPageTable(&vm_manager.page_table);
}
SharedPtr<Process> KernelSystem::GetProcessById(u32 process_id) const { SharedPtr<Process> KernelSystem::GetProcessById(u32 process_id) const {
auto itr = std::find_if( auto itr = std::find_if(

View file

@ -8,7 +8,6 @@
#include "core/hle/kernel/errors.h" #include "core/hle/kernel/errors.h"
#include "core/hle/kernel/vm_manager.h" #include "core/hle/kernel/vm_manager.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
#include "core/mmio.h" #include "core/mmio.h"
namespace Kernel { namespace Kernel {
@ -37,7 +36,7 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
return true; return true;
} }
VMManager::VMManager() { VMManager::VMManager(Memory::MemorySystem& memory) : memory(memory) {
Reset(); Reset();
} }
@ -351,13 +350,13 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
switch (vma.type) { switch (vma.type) {
case VMAType::Free: case VMAType::Free:
Memory::UnmapRegion(page_table, vma.base, vma.size); memory.UnmapRegion(page_table, vma.base, vma.size);
break; break;
case VMAType::BackingMemory: case VMAType::BackingMemory:
Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
break; break;
case VMAType::MMIO: case VMAType::MMIO:
Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
break; break;
} }
} }

View file

@ -113,7 +113,7 @@ public:
std::map<VAddr, VirtualMemoryArea> vma_map; std::map<VAddr, VirtualMemoryArea> vma_map;
using VMAHandle = decltype(vma_map)::const_iterator; using VMAHandle = decltype(vma_map)::const_iterator;
VMManager(); explicit VMManager(Memory::MemorySystem& memory);
~VMManager(); ~VMManager();
/// Clears the address space map, re-initializing with a single free area. /// Clears the address space map, re-initializing with a single free area.
@ -227,5 +227,7 @@ private:
/// Updates the pages corresponding to this VMA so they match the VMA's attributes. /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
void UpdatePageTableForVMA(const VirtualMemoryArea& vma); void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
Memory::MemorySystem& memory;
}; };
} // namespace Kernel } // namespace Kernel

View file

@ -327,10 +327,13 @@ ResultCode AppletManager::PrepareToStartLibraryApplet(AppletId applet_id) {
// There are some problems with LLE applets. The rasterizer cache gets out of sync // There are some problems with LLE applets. The rasterizer cache gets out of sync
// when the applet is closed. To avoid breaking applications because of the issue, // when the applet is closed. To avoid breaking applications because of the issue,
// we are going to disable loading LLE applets before further fixes are done. // we are going to disable loading LLE applets before further fixes are done.
// auto process = NS::LaunchTitle(FS::MediaType::NAND, GetTitleIdForApplet(applet_id, auto cfg = Service::CFG::GetModule(system);
// region_value)); if (process) { u32 region_value = cfg->GetRegionValue();
// return RESULT_SUCCESS; auto process =
// } NS::LaunchTitle(FS::MediaType::NAND, GetTitleIdForApplet(applet_id, region_value));
if (process) {
return RESULT_SUCCESS;
}
// If we weren't able to load the native applet title, try to fallback to an HLE implementation. // If we weren't able to load the native applet title, try to fallback to an HLE implementation.
auto applet = HLE::Applets::Applet::Get(applet_id); auto applet = HLE::Applets::Applet::Get(applet_id);
@ -354,10 +357,13 @@ ResultCode AppletManager::PreloadLibraryApplet(AppletId applet_id) {
// There are some problems with LLE applets. The rasterizer cache gets out of sync // There are some problems with LLE applets. The rasterizer cache gets out of sync
// when the applet is closed. To avoid breaking applications because of the issue, // when the applet is closed. To avoid breaking applications because of the issue,
// we are going to disable loading LLE applets before further fixes are done. // we are going to disable loading LLE applets before further fixes are done.
// auto process = NS::LaunchTitle(FS::MediaType::NAND, GetTitleIdForApplet(applet_id, auto cfg = Service::CFG::GetModule(system);
// region_value)); if (process) { u32 region_value = cfg->GetRegionValue();
// return RESULT_SUCCESS; auto process =
// } NS::LaunchTitle(FS::MediaType::NAND, GetTitleIdForApplet(applet_id, region_value));
if (process) {
return RESULT_SUCCESS;
}
// If we weren't able to load the native applet title, try to fallback to an HLE implementation. // If we weren't able to load the native applet title, try to fallback to an HLE implementation.
auto applet = HLE::Applets::Applet::Get(applet_id); auto applet = HLE::Applets::Applet::Get(applet_id);

View file

@ -15,12 +15,45 @@
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/lock.h" #include "core/hle/lock.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
#include "video_core/renderer_base.h" #include "video_core/renderer_base.h"
#include "video_core/video_core.h" #include "video_core/video_core.h"
namespace Memory { namespace Memory {
class RasterizerCacheMarker {
public:
void Mark(VAddr addr, bool cached) {
bool* p = At(addr);
if (p)
*p = cached;
}
bool IsCached(VAddr addr) {
bool* p = At(addr);
if (p)
return *p;
return false;
}
private:
bool* At(VAddr addr) {
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
return &vram[(addr - VRAM_VADDR) / PAGE_SIZE];
}
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
return &linear_heap[(addr - LINEAR_HEAP_VADDR) / PAGE_SIZE];
}
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
return &new_linear_heap[(addr - NEW_LINEAR_HEAP_VADDR) / PAGE_SIZE];
}
return nullptr;
}
std::array<bool, VRAM_SIZE / PAGE_SIZE> vram{};
std::array<bool, LINEAR_HEAP_SIZE / PAGE_SIZE> linear_heap{};
std::array<bool, NEW_LINEAR_HEAP_SIZE / PAGE_SIZE> new_linear_heap{};
};
class MemorySystem::Impl { class MemorySystem::Impl {
public: public:
Impl() { Impl() {
@ -36,6 +69,8 @@ public:
std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE); std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE);
PageTable* current_page_table = nullptr; PageTable* current_page_table = nullptr;
RasterizerCacheMarker cache_marker;
std::vector<PageTable*> page_table_list;
}; };
MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {} MemorySystem::MemorySystem() : impl(std::make_unique<Impl>()) {}
@ -52,7 +87,7 @@ PageTable* MemorySystem::GetCurrentPageTable() const {
return impl->current_page_table; return impl->current_page_table;
} }
static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) { void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory, base * PAGE_SIZE, LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory, base * PAGE_SIZE,
(base + size) * PAGE_SIZE); (base + size) * PAGE_SIZE);
@ -66,19 +101,26 @@ static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, Page
page_table.attributes[base] = type; page_table.attributes[base] = type;
page_table.pointers[base] = memory; page_table.pointers[base] = memory;
// If the memory to map is already rasterizer-cached, mark the page
if (type == PageType::Memory && impl->cache_marker.IsCached(base * PAGE_SIZE)) {
page_table.attributes[base] = PageType::RasterizerCachedMemory;
page_table.pointers[base] = nullptr;
}
base += 1; base += 1;
if (memory != nullptr) if (memory != nullptr)
memory += PAGE_SIZE; memory += PAGE_SIZE;
} }
} }
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) { void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
} }
void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) { void MemorySystem::MapIoRegion(PageTable& page_table, VAddr base, u32 size,
MMIORegionPointer mmio_handler) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
@ -86,7 +128,7 @@ void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer
page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
} }
void UnmapRegion(PageTable& page_table, VAddr base, u32 size) { void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
@ -105,6 +147,15 @@ u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
UNREACHABLE(); UNREACHABLE();
} }
void MemorySystem::RegisterPageTable(PageTable* page_table) {
impl->page_table_list.push_back(page_table);
}
void MemorySystem::UnregisterPageTable(PageTable* page_table) {
impl->page_table_list.erase(
std::find(impl->page_table_list.begin(), impl->page_table_list.end(), page_table));
}
/** /**
* This function should only be called for virtual addreses with attribute `PageType::Special`. * This function should only be called for virtual addreses with attribute `PageType::Special`.
*/ */
@ -324,18 +375,20 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) { for (VAddr vaddr : PhysicalToVirtualAddressForRasterizer(paddr)) {
PageType& page_type = impl->current_page_table->attributes[vaddr >> PAGE_BITS]; impl->cache_marker.Mark(vaddr, cached);
for (PageTable* page_table : impl->page_table_list) {
PageType& page_type = page_table->attributes[vaddr >> PAGE_BITS];
if (cached) { if (cached) {
// Switch page type to cached if now cached // Switch page type to cached if now cached
switch (page_type) { switch (page_type) {
case PageType::Unmapped: case PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address // It is not necessary for a process to have this region mapped into its
// space, for example, a system module need not have a VRAM mapping. // address space, for example, a system module need not have a VRAM mapping.
break; break;
case PageType::Memory: case PageType::Memory:
page_type = PageType::RasterizerCachedMemory; page_type = PageType::RasterizerCachedMemory;
impl->current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -344,12 +397,12 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
// Switch page type to uncached if now uncached // Switch page type to uncached if now uncached
switch (page_type) { switch (page_type) {
case PageType::Unmapped: case PageType::Unmapped:
// It is not necessary for a process to have this region mapped into its address // It is not necessary for a process to have this region mapped into its
// space, for example, a system module need not have a VRAM mapping. // address space, for example, a system module need not have a VRAM mapping.
break; break;
case PageType::RasterizerCachedMemory: { case PageType::RasterizerCachedMemory: {
page_type = PageType::Memory; page_type = PageType::Memory;
impl->current_page_table->pointers[vaddr >> PAGE_BITS] = page_table->pointers[vaddr >> PAGE_BITS] =
GetPointerForRasterizerCache(vaddr & ~PAGE_MASK); GetPointerForRasterizerCache(vaddr & ~PAGE_MASK);
break; break;
} }
@ -360,6 +413,7 @@ void MemorySystem::RasterizerMarkRegionCached(PAddr start, u32 size, bool cached
} }
} }
} }
}
void RasterizerFlushRegion(PAddr start, u32 size) { void RasterizerFlushRegion(PAddr start, u32 size) {
if (VideoCore::g_renderer == nullptr) { if (VideoCore::g_renderer == nullptr) {

View file

@ -214,6 +214,27 @@ public:
MemorySystem(); MemorySystem();
~MemorySystem(); ~MemorySystem();
/**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler);
void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
/// Currently active page table /// Currently active page table
void SetCurrentPageTable(PageTable* page_table); void SetCurrentPageTable(PageTable* page_table);
PageTable* GetCurrentPageTable() const; PageTable* GetCurrentPageTable() const;
@ -260,6 +281,12 @@ public:
*/ */
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached); void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
/// Registers page table for rasterizer cache marking
void RegisterPageTable(PageTable* page_table);
/// Unregisters page table for rasterizer cache marking
void UnregisterPageTable(PageTable* page_table);
private: private:
template <typename T> template <typename T>
T Read(const VAddr vaddr); T Read(const VAddr vaddr);
@ -275,6 +302,8 @@ private:
*/ */
u8* GetPointerForRasterizerCache(VAddr addr); u8* GetPointerForRasterizerCache(VAddr addr);
void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type);
class Impl; class Impl;
std::unique_ptr<Impl> impl; std::unique_ptr<Impl> impl;

View file

@ -1,32 +0,0 @@
// Copyright 2015 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "core/mmio.h"
namespace Memory {
/**
* Maps an allocated buffer onto a region of the emulated process address space.
*
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
/**
* Maps a region of the emulated process address space as a IO region.
* @param page_table The page table of the emulated process.
* @param base The address to start mapping at. Must be page-aligned.
* @param size The amount of bytes to map. Must be page-aligned.
* @param mmio_handler The handler that backs the mapping.
*/
void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler);
void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
} // namespace Memory

View file

@ -6,7 +6,6 @@
#include "core/core_timing.h" #include "core/core_timing.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/memory.h" #include "core/memory.h"
#include "core/memory_setup.h"
#include "tests/core/arm/arm_test_common.h" #include "tests/core/arm/arm_test_common.h"
namespace ArmTests { namespace ArmTests {
@ -31,15 +30,17 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
page_table->pointers.fill(nullptr); page_table->pointers.fill(nullptr);
page_table->attributes.fill(Memory::PageType::Unmapped); page_table->attributes.fill(Memory::PageType::Unmapped);
Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); memory.MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); memory.MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
memory.SetCurrentPageTable(page_table); memory.SetCurrentPageTable(page_table);
} }
TestEnvironment::~TestEnvironment() { TestEnvironment::~TestEnvironment() {
Memory::UnmapRegion(*page_table, 0x80000000, 0x80000000); Memory::MemorySystem& memory = *Core::System::GetInstance().memory;
Memory::UnmapRegion(*page_table, 0x00000000, 0x80000000); memory.UnmapRegion(*page_table, 0x80000000, 0x80000000);
memory.UnmapRegion(*page_table, 0x00000000, 0x80000000);
Core::System::GetInstance().kernel.reset();
} }
void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) { void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) {

View file

@ -11,9 +11,10 @@
TEST_CASE("Memory Basics", "[kernel][memory]") { TEST_CASE("Memory Basics", "[kernel][memory]") {
auto block = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE); auto block = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
Memory::MemorySystem memory;
SECTION("mapping memory") { SECTION("mapping memory") {
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack. // Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
auto manager = std::make_unique<Kernel::VMManager>(); auto manager = std::make_unique<Kernel::VMManager>(memory);
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(), auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
Kernel::MemoryState::Private); Kernel::MemoryState::Private);
REQUIRE(result.Code() == RESULT_SUCCESS); REQUIRE(result.Code() == RESULT_SUCCESS);
@ -28,7 +29,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
SECTION("unmapping memory") { SECTION("unmapping memory") {
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack. // Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
auto manager = std::make_unique<Kernel::VMManager>(); auto manager = std::make_unique<Kernel::VMManager>(memory);
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(), auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
Kernel::MemoryState::Private); Kernel::MemoryState::Private);
REQUIRE(result.Code() == RESULT_SUCCESS); REQUIRE(result.Code() == RESULT_SUCCESS);
@ -44,7 +45,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
SECTION("changing memory permissions") { SECTION("changing memory permissions") {
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack. // Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
auto manager = std::make_unique<Kernel::VMManager>(); auto manager = std::make_unique<Kernel::VMManager>(memory);
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(), auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
Kernel::MemoryState::Private); Kernel::MemoryState::Private);
REQUIRE(result.Code() == RESULT_SUCCESS); REQUIRE(result.Code() == RESULT_SUCCESS);
@ -63,7 +64,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
SECTION("changing memory state") { SECTION("changing memory state") {
// Because of the PageTable, Kernel::VMManager is too big to be created on the stack. // Because of the PageTable, Kernel::VMManager is too big to be created on the stack.
auto manager = std::make_unique<Kernel::VMManager>(); auto manager = std::make_unique<Kernel::VMManager>(memory);
auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(), auto result = manager->MapBackingMemory(Memory::HEAP_VADDR, block->data(), block->size(),
Kernel::MemoryState::Private); Kernel::MemoryState::Private);
REQUIRE(result.Code() == RESULT_SUCCESS); REQUIRE(result.Code() == RESULT_SUCCESS);