mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-23 00:40:58 +01:00
core: hle: kernel: k_memory_manager: Refresh.
This commit is contained in:
parent
32d7faafa8
commit
ba21ba0c5c
4 changed files with 461 additions and 370 deletions
|
@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
|||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||
return KMemoryManager::Pool::SystemNonSecure;
|
||||
} else {
|
||||
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
|
||||
return {};
|
||||
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KMemoryManager::KMemoryManager(Core::System& system_)
|
||||
: system{system_}, pool_locks{
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
} {}
|
||||
KMemoryManager::KMemoryManager(Core::System& system)
|
||||
: m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()},
|
||||
m_pool_locks{
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
} {}
|
||||
|
||||
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
|
||||
|
||||
// Clear the management region to zero.
|
||||
const VAddr management_region_end = management_region + management_region_size;
|
||||
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||
|
||||
// Reset our manager count.
|
||||
num_managers = 0;
|
||||
m_num_managers = 0;
|
||||
|
||||
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
|
||||
while (num_managers != MaxManagerCount) {
|
||||
while (m_num_managers != MaxManagerCount) {
|
||||
// Locate the region that should initialize the current manager.
|
||||
PAddr region_address = 0;
|
||||
size_t region_size = 0;
|
||||
Pool region_pool = Pool::Count;
|
||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
// We only care about regions that we need to create managers for.
|
||||
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to initialize the managers in order.
|
||||
if (it.GetAttributes() != num_managers) {
|
||||
if (it.GetAttributes() != m_num_managers) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
|||
}
|
||||
|
||||
// Initialize a new manager for the region.
|
||||
Impl* manager = std::addressof(managers[num_managers++]);
|
||||
ASSERT(num_managers <= managers.size());
|
||||
Impl* manager = std::addressof(m_managers[m_num_managers++]);
|
||||
ASSERT(m_num_managers <= m_managers.size());
|
||||
|
||||
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
|
||||
management_region_end, region_pool);
|
||||
|
@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
|||
|
||||
// Insert the manager into the pool list.
|
||||
const auto region_pool_index = static_cast<u32>(region_pool);
|
||||
if (pool_managers_tail[region_pool_index] == nullptr) {
|
||||
pool_managers_head[region_pool_index] = manager;
|
||||
if (m_pool_managers_tail[region_pool_index] == nullptr) {
|
||||
m_pool_managers_head[region_pool_index] = manager;
|
||||
} else {
|
||||
pool_managers_tail[region_pool_index]->SetNext(manager);
|
||||
manager->SetPrev(pool_managers_tail[region_pool_index]);
|
||||
m_pool_managers_tail[region_pool_index]->SetNext(manager);
|
||||
manager->SetPrev(m_pool_managers_tail[region_pool_index]);
|
||||
}
|
||||
pool_managers_tail[region_pool_index] = manager;
|
||||
m_pool_managers_tail[region_pool_index] = manager;
|
||||
}
|
||||
|
||||
// Free each region to its corresponding heap.
|
||||
|
@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
|||
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||
const PAddr ini_last = ini_end - 1;
|
||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
// Get the manager for the region.
|
||||
auto index = it.GetAttributes();
|
||||
auto& manager = managers[index];
|
||||
auto& manager = m_managers[it.GetAttributes()];
|
||||
|
||||
const PAddr cur_start = it.GetAddress();
|
||||
const PAddr cur_last = it.GetLastAddress();
|
||||
|
@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
|||
}
|
||||
|
||||
// Update the used size for all managers.
|
||||
for (size_t i = 0; i < num_managers; ++i) {
|
||||
managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||
for (size_t i = 0; i < m_num_managers; ++i) {
|
||||
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
||||
// Early return if we're allocating no pages.
|
||||
if (num_pages == 0) {
|
||||
|
@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
|||
|
||||
// Lock the pool that we're allocating from.
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);
|
||||
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
|
||||
|
@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
|||
PAddr allocated_block = 0;
|
||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
|
||||
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
||||
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
|
||||
if (allocated_block != 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
|||
return 0;
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some.
|
||||
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Open the first reference to the pages.
|
||||
|
@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
|||
}
|
||||
|
||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
Direction dir, bool unoptimized, bool random) {
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
||||
|
||||
// Ensure that we don't leave anything un-freed.
|
||||
auto group_guard = SCOPE_GUARD({
|
||||
ON_RESULT_FAILURE {
|
||||
for (const auto& it : out->Nodes()) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
|
||||
const size_t num_pages_to_free =
|
||||
auto& manager = this->GetManager(it.GetAddress());
|
||||
const size_t node_num_pages =
|
||||
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||
manager.Free(it.GetAddress(), num_pages_to_free);
|
||||
manager.Free(it.GetAddress(), node_num_pages);
|
||||
}
|
||||
});
|
||||
out->Finalize();
|
||||
};
|
||||
|
||||
// Keep allocating until we've allocated all our pages.
|
||||
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
||||
|
@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
|||
break;
|
||||
}
|
||||
|
||||
// Safely add it to our group.
|
||||
{
|
||||
auto block_guard =
|
||||
SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
block_guard.Cancel();
|
||||
// Ensure we don't leak the block if we fail.
|
||||
ON_RESULT_FAILURE_2 {
|
||||
cur_manager->Free(allocated_block, pages_per_alloc);
|
||||
};
|
||||
|
||||
// Add the block to our group.
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (unoptimized) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
|
@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
|||
R_UNLESS(num_pages == 0, ResultOutOfMemory);
|
||||
|
||||
// We succeeded!
|
||||
group_guard.Cancel();
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
||||
|
@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
|||
|
||||
// Lock the pool that we're allocating from.
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
// Allocate the page group.
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir,
|
||||
m_has_optimized_process[static_cast<size_t>(pool)], true));
|
||||
|
||||
// Open the first reference to the pages.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
|
@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
|||
size_t remaining_pages = block.GetNumPages();
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
|
@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
|
@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
|
|||
const auto [pool, dir] = DecodeOption(option);
|
||||
|
||||
// Allocate the memory.
|
||||
bool optimized;
|
||||
{
|
||||
// Lock the pool that we're allocating from.
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
// Check if we have an optimized process.
|
||||
const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)];
|
||||
const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;
|
||||
|
||||
// Allocate the page group.
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized,
|
||||
false));
|
||||
|
||||
// Open the first reference to the pages.
|
||||
// Set whether we should optimize.
|
||||
optimized = has_optimized && is_optimized;
|
||||
}
|
||||
|
||||
// Perform optimized memory tracking, if we should.
|
||||
if (optimized) {
|
||||
// Iterate over the allocated blocks.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
PAddr cur_address = block.GetAddress();
|
||||
size_t remaining_pages = block.GetNumPages();
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
||||
// Get the block extents.
|
||||
const PAddr block_address = block.GetAddress();
|
||||
const size_t block_pages = block.GetNumPages();
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
manager.OpenFirst(cur_address, cur_pages);
|
||||
// If it has no pages, we don't need to do anything.
|
||||
if (block_pages == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
// Fill all the pages that we need to fill.
|
||||
bool any_new = false;
|
||||
{
|
||||
PAddr cur_address = block_address;
|
||||
size_t remaining_pages = block_pages;
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
any_new =
|
||||
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
|
||||
// If there are new pages, update tracking for the allocation.
|
||||
if (any_new) {
|
||||
// Update tracking for the allocation.
|
||||
PAddr cur_address = block_address;
|
||||
size_t remaining_pages = block_pages;
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Lock the pool for the manager.
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
|
||||
// Track some or all of the current pages.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
manager.TrackOptimizedAllocation(cur_address, cur_pages);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all the allocated memory.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||
block.GetSize());
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void KMemoryManager::Open(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Open(address, cur_pages);
|
||||
} else {
|
||||
// Set all the allocated memory.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||
block.GetSize());
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(PAddr address, size_t num_pages) {
|
||||
// Repeatedly close references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Close(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Close(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
}
|
||||
void KMemoryManager::Open(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Open(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
|
||||
|
@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
|
|||
ASSERT(Common::IsAligned(total_management_size, PageSize));
|
||||
|
||||
// Setup region.
|
||||
pool = p;
|
||||
management_region = management;
|
||||
page_reference_counts.resize(
|
||||
m_pool = p;
|
||||
m_management_region = management;
|
||||
m_page_reference_counts.resize(
|
||||
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
|
||||
ASSERT(Common::IsAligned(management_region, PageSize));
|
||||
ASSERT(Common::IsAligned(m_management_region, PageSize));
|
||||
|
||||
// Initialize the manager's KPageHeap.
|
||||
heap.Initialize(address, size, management + manager_size, page_heap_size);
|
||||
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
|
||||
|
||||
return total_management_size;
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
|
||||
u8 fill_pattern) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const size_t optimize_map_size =
|
||||
|
|
|
@ -21,11 +21,8 @@ namespace Kernel {
|
|||
|
||||
class KPageGroup;
|
||||
|
||||
class KMemoryManager final {
|
||||
class KMemoryManager {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KMemoryManager);
|
||||
YUZU_NON_MOVEABLE(KMemoryManager);
|
||||
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
Applet = 1,
|
||||
|
@ -45,16 +42,85 @@ public:
|
|||
enum class Direction : u32 {
|
||||
FromFront = 0,
|
||||
FromBack = 1,
|
||||
|
||||
Shift = 0,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
explicit KMemoryManager(Core::System& system_);
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
|
||||
explicit KMemoryManager(Core::System& system);
|
||||
|
||||
void Initialize(VAddr management_region, size_t management_region_size);
|
||||
|
||||
constexpr size_t GetSize(Pool pool) const {
|
||||
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
||||
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
|
||||
Pool GetPool(PAddr address) const {
|
||||
return this->GetManager(address).GetPool();
|
||||
}
|
||||
|
||||
void Open(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Open(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void OpenFirst(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.OpenFirst(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(PAddr address, size_t num_pages) {
|
||||
// Repeatedly close references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Close(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetSize() {
|
||||
size_t total = 0;
|
||||
for (size_t i = 0; i < m_num_managers; i++) {
|
||||
total += m_managers[i].GetSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
size_t GetSize(Pool pool) {
|
||||
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||
size_t total = 0;
|
||||
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||
|
@ -64,18 +130,36 @@ public:
|
|||
return total;
|
||||
}
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
size_t GetFreeSize() {
|
||||
size_t total = 0;
|
||||
for (size_t i = 0; i < m_num_managers; i++) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]);
|
||||
total += m_managers[i].GetFreeSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
size_t GetFreeSize(Pool pool) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
void Close(PAddr address, size_t num_pages);
|
||||
void Close(const KPageGroup& pg);
|
||||
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||
size_t total = 0;
|
||||
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||
manager = this->GetNextManager(manager, GetSizeDirection)) {
|
||||
total += manager->GetFreeSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
void Open(PAddr address, size_t num_pages);
|
||||
void Open(const KPageGroup& pg);
|
||||
void DumpFreeList(Pool pool) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
constexpr Direction DumpDirection = Direction::FromFront;
|
||||
for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr;
|
||||
manager = this->GetNextManager(manager, DumpDirection)) {
|
||||
manager->DumpFreeList();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
|
@ -88,14 +172,13 @@ public:
|
|||
}
|
||||
|
||||
static constexpr Pool GetPool(u32 option) {
|
||||
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
|
||||
return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>
|
||||
static_cast<u32>(Pool::Shift));
|
||||
}
|
||||
|
||||
static constexpr Direction GetDirection(u32 option) {
|
||||
return static_cast<Direction>(
|
||||
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||
|
@ -103,74 +186,88 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
class Impl final {
|
||||
class Impl {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(Impl);
|
||||
YUZU_NON_MOVEABLE(Impl);
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size);
|
||||
|
||||
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
~Impl() = default;
|
||||
|
||||
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
|
||||
Pool p);
|
||||
|
||||
VAddr AllocateBlock(s32 index, bool random) {
|
||||
return heap.AllocateBlock(index, random);
|
||||
PAddr AllocateBlock(s32 index, bool random) {
|
||||
return m_heap.AllocateBlock(index, random);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, size_t num_pages) {
|
||||
heap.Free(addr, num_pages);
|
||||
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
|
||||
return m_heap.AllocateAligned(index, num_pages, align_pages);
|
||||
}
|
||||
void Free(PAddr addr, size_t num_pages) {
|
||||
m_heap.Free(addr, num_pages);
|
||||
}
|
||||
|
||||
void SetInitialUsedHeapSize(size_t reserved_size) {
|
||||
heap.SetInitialUsedSize(reserved_size);
|
||||
m_heap.SetInitialUsedSize(reserved_size);
|
||||
}
|
||||
|
||||
void InitializeOptimizedMemory() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
|
||||
void TrackOptimizedAllocation(PAddr block, size_t num_pages);
|
||||
|
||||
bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
|
||||
|
||||
constexpr Pool GetPool() const {
|
||||
return pool;
|
||||
return m_pool;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return heap.GetSize();
|
||||
return m_heap.GetSize();
|
||||
}
|
||||
constexpr PAddr GetEndAddress() const {
|
||||
return m_heap.GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap.GetAddress();
|
||||
size_t GetFreeSize() const {
|
||||
return m_heap.GetFreeSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return heap.GetEndAddress();
|
||||
void DumpFreeList() const {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
constexpr size_t GetPageOffset(PAddr address) const {
|
||||
return heap.GetPageOffset(address);
|
||||
return m_heap.GetPageOffset(address);
|
||||
}
|
||||
|
||||
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
|
||||
return heap.GetPageOffsetToEnd(address);
|
||||
return m_heap.GetPageOffsetToEnd(address);
|
||||
}
|
||||
|
||||
constexpr void SetNext(Impl* n) {
|
||||
next = n;
|
||||
m_next = n;
|
||||
}
|
||||
|
||||
constexpr void SetPrev(Impl* n) {
|
||||
prev = n;
|
||||
m_prev = n;
|
||||
}
|
||||
|
||||
constexpr Impl* GetNext() const {
|
||||
return next;
|
||||
return m_next;
|
||||
}
|
||||
|
||||
constexpr Impl* GetPrev() const {
|
||||
return prev;
|
||||
return m_prev;
|
||||
}
|
||||
|
||||
void OpenFirst(PAddr address, size_t num_pages) {
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++page_reference_counts[index]);
|
||||
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||
ASSERT(ref_count == 1);
|
||||
|
||||
index++;
|
||||
|
@ -181,7 +278,7 @@ private:
|
|||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++page_reference_counts[index]);
|
||||
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||
ASSERT(ref_count > 1);
|
||||
|
||||
index++;
|
||||
|
@ -195,8 +292,8 @@ private:
|
|||
size_t free_start = 0;
|
||||
size_t free_count = 0;
|
||||
while (index < end) {
|
||||
ASSERT(page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--page_reference_counts[index]);
|
||||
ASSERT(m_page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--m_page_reference_counts[index]);
|
||||
|
||||
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
|
||||
if (ref_count == 0) {
|
||||
|
@ -208,7 +305,7 @@ private:
|
|||
}
|
||||
} else {
|
||||
if (free_count > 0) {
|
||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
||||
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||
free_count = 0;
|
||||
}
|
||||
}
|
||||
|
@ -217,44 +314,36 @@ private:
|
|||
}
|
||||
|
||||
if (free_count > 0) {
|
||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
||||
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size);
|
||||
|
||||
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
KPageHeap heap;
|
||||
std::vector<RefCount> page_reference_counts;
|
||||
VAddr management_region{};
|
||||
Pool pool{};
|
||||
Impl* next{};
|
||||
Impl* prev{};
|
||||
KPageHeap m_heap;
|
||||
std::vector<RefCount> m_page_reference_counts;
|
||||
VAddr m_management_region{};
|
||||
Pool m_pool{};
|
||||
Impl* m_next{};
|
||||
Impl* m_prev{};
|
||||
};
|
||||
|
||||
private:
|
||||
Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
|
||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
Impl& GetManager(PAddr address) {
|
||||
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
|
||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
const Impl& GetManager(PAddr address) const {
|
||||
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
|
||||
return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
|
||||
: pool_managers_head[static_cast<size_t>(pool)];
|
||||
constexpr Impl* GetFirstManager(Pool pool, Direction dir) {
|
||||
return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)]
|
||||
: m_pool_managers_head[static_cast<size_t>(pool)];
|
||||
}
|
||||
|
||||
constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
|
||||
constexpr Impl* GetNextManager(Impl* cur, Direction dir) {
|
||||
if (dir == Direction::FromBack) {
|
||||
return cur->GetPrev();
|
||||
} else {
|
||||
|
@ -263,15 +352,21 @@ private:
|
|||
}
|
||||
|
||||
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
||||
bool random);
|
||||
bool unoptimized, bool random);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
|
||||
std::array<Impl*, MaxManagerCount> pool_managers_head{};
|
||||
std::array<Impl*, MaxManagerCount> pool_managers_tail{};
|
||||
std::array<Impl, MaxManagerCount> managers;
|
||||
size_t num_managers{};
|
||||
template <typename T>
|
||||
using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>;
|
||||
|
||||
Core::System& m_system;
|
||||
const KMemoryLayout& m_memory_layout;
|
||||
PoolArray<KLightLock> m_pool_locks;
|
||||
std::array<Impl*, MaxManagerCount> m_pool_managers_head{};
|
||||
std::array<Impl*, MaxManagerCount> m_pool_managers_tail{};
|
||||
std::array<Impl, MaxManagerCount> m_managers;
|
||||
size_t m_num_managers{};
|
||||
PoolArray<u64> m_optimized_process_ids{};
|
||||
PoolArray<bool> m_has_optimized_process{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -114,7 +114,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
|||
|
||||
// Set other basic fields
|
||||
m_enable_aslr = enable_aslr;
|
||||
m_enable_device_address_space_merge = false;
|
||||
m_enable_device_address_space_merge = enable_das_merge;
|
||||
m_address_space_start = start;
|
||||
m_address_space_end = end;
|
||||
m_is_kernel = false;
|
||||
|
@ -219,10 +219,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
|||
}
|
||||
}
|
||||
|
||||
// Set heap members
|
||||
// Set heap and fill members.
|
||||
m_current_heap_end = m_heap_region_start;
|
||||
m_max_heap_size = 0;
|
||||
m_max_physical_memory_size = 0;
|
||||
m_mapped_physical_memory_size = 0;
|
||||
m_mapped_unsafe_physical_memory = 0;
|
||||
m_mapped_insecure_memory = 0;
|
||||
m_mapped_ipc_server_memory = 0;
|
||||
|
||||
m_heap_fill_value = 0;
|
||||
m_ipc_fill_value = 0;
|
||||
m_stack_fill_value = 0;
|
||||
|
||||
// Set allocation option.
|
||||
m_allocate_option =
|
||||
KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
|
||||
: KMemoryManager::Direction::FromFront);
|
||||
|
||||
// Ensure that we regions inside our address space
|
||||
auto IsInAddressSpace = [&](VAddr addr) {
|
||||
|
@ -271,6 +283,16 @@ void KPageTable::Finalize() {
|
|||
m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
|
||||
});
|
||||
|
||||
// Release any insecure mapped memory.
|
||||
if (m_mapped_insecure_memory) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Release any ipc server memory.
|
||||
if (m_mapped_ipc_server_memory) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Close the backing page table, as the destructor is not called for guest objects.
|
||||
m_page_table_impl.reset();
|
||||
}
|
||||
|
@ -690,9 +712,20 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
|
|||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
|
||||
m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
|
||||
}
|
||||
|
||||
void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
|
||||
for (size_t index = 0; index < num_pages; ++index) {
|
||||
const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
|
||||
m_system.Kernel().MemoryManager().Close(paddr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
|
||||
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
||||
|
||||
// Calculate the last address for convenience.
|
||||
const VAddr last_address = address + size - 1;
|
||||
|
@ -746,15 +779,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
{
|
||||
// Reserve the memory from the process resource limit.
|
||||
KScopedResourceReservation memory_reservation(
|
||||
m_system.Kernel().CurrentProcess()->GetResourceLimit(),
|
||||
LimitableResource::PhysicalMemory, size - mapped_size);
|
||||
m_resource_limit, LimitableResource::PhysicalMemory, size - mapped_size);
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the new memory.
|
||||
KPageGroup pg;
|
||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
&pg, (size - mapped_size) / PageSize,
|
||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
||||
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||
&pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
|
||||
|
||||
// If we fail in the next bit (or retry), we need to cleanup the pages.
|
||||
// auto pg_guard = SCOPE_GUARD {
|
||||
// pg.OpenFirst();
|
||||
// pg.Close();
|
||||
//};
|
||||
|
||||
// Map the memory.
|
||||
{
|
||||
|
@ -814,15 +851,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
|
||||
// Create an update allocator.
|
||||
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
||||
Result allocator_result{ResultSuccess};
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||
m_memory_block_slab_manager,
|
||||
num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
||||
// We're going to perform an update, so create a helper.
|
||||
// KScopedPageTableUpdater updater(this);
|
||||
|
||||
// Prepare to iterate over the memory.
|
||||
auto pg_it = pg.Nodes().begin();
|
||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
||||
size_t pg_pages = pg_it->GetNumPages();
|
||||
|
||||
// Reset the current tracking address, and make sure we clean up on failure.
|
||||
// pg_guard.Cancel();
|
||||
cur_address = address;
|
||||
auto unmap_guard = detail::ScopeExit([&] {
|
||||
ON_RESULT_FAILURE {
|
||||
if (cur_address > address) {
|
||||
const VAddr last_unmap_address = cur_address - 1;
|
||||
|
||||
|
@ -845,6 +891,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
last_unmap_address + 1 - cur_address) /
|
||||
PageSize;
|
||||
|
||||
// HACK: Manually close the pages.
|
||||
HACK_ClosePages(cur_address, cur_pages);
|
||||
|
||||
// Unmap.
|
||||
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
|
||||
OperationType::Unmap)
|
||||
|
@ -861,12 +910,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
++it;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Iterate over the memory.
|
||||
auto pg_it = pg.Nodes().begin();
|
||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
||||
size_t pg_pages = pg_it->GetNumPages();
|
||||
// Release any remaining unmapped memory.
|
||||
m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
|
||||
m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
|
||||
for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
|
||||
m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
|
||||
pg_it->GetNumPages());
|
||||
m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
|
||||
pg_it->GetNumPages());
|
||||
}
|
||||
};
|
||||
|
||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||
while (true) {
|
||||
|
@ -901,6 +955,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
||||
OperationType::Map, pg_phys_addr));
|
||||
|
||||
// HACK: Manually open the pages.
|
||||
HACK_OpenPages(pg_phys_addr, cur_pages);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
map_pages -= cur_pages;
|
||||
|
@ -932,9 +989,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
||||
|
||||
// Cancel our guard.
|
||||
unmap_guard.Cancel();
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
@ -943,7 +997,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
|
||||
Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
|
||||
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
||||
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
|
@ -952,8 +1006,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
const VAddr last_address = address + size - 1;
|
||||
|
||||
// Define iteration variables.
|
||||
VAddr cur_address = 0;
|
||||
size_t mapped_size = 0;
|
||||
VAddr map_start_address = 0;
|
||||
VAddr map_last_address = 0;
|
||||
|
||||
VAddr cur_address;
|
||||
size_t mapped_size;
|
||||
size_t num_allocator_blocks = 0;
|
||||
|
||||
// Check if the memory is mapped.
|
||||
|
@ -979,27 +1036,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
if (is_normal) {
|
||||
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
|
||||
|
||||
if (map_start_address == 0) {
|
||||
map_start_address = cur_address;
|
||||
}
|
||||
map_last_address =
|
||||
(last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
|
||||
|
||||
if (info.GetAddress() < address) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
if (last_address < info.GetLastAddress()) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
|
||||
mapped_size += (map_last_address + 1 - cur_address);
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (is_normal) {
|
||||
mapped_size += (last_address + 1 - cur_address);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Track the memory if it's mapped.
|
||||
if (is_normal) {
|
||||
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
cur_address = info.GetEndAddress();
|
||||
++it;
|
||||
|
@ -1009,125 +1066,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
R_SUCCEED_IF(mapped_size == 0);
|
||||
}
|
||||
|
||||
// Make a page group for the unmap region.
|
||||
KPageGroup pg;
|
||||
{
|
||||
auto& impl = this->PageTableImpl();
|
||||
|
||||
// Begin traversal.
|
||||
Common::PageTable::TraversalContext context;
|
||||
Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
|
||||
bool cur_valid = false;
|
||||
Common::PageTable::TraversalEntry next_entry;
|
||||
bool next_valid = false;
|
||||
size_t tot_size = 0;
|
||||
|
||||
cur_address = address;
|
||||
next_valid = impl.BeginTraversal(next_entry, context, cur_address);
|
||||
next_entry.block_size =
|
||||
(next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
|
||||
|
||||
// Iterate, building the group.
|
||||
while (true) {
|
||||
if ((!next_valid && !cur_valid) ||
|
||||
(next_valid && cur_valid &&
|
||||
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
||||
cur_entry.block_size += next_entry.block_size;
|
||||
} else {
|
||||
if (cur_valid) {
|
||||
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
|
||||
}
|
||||
|
||||
// Update tracking variables.
|
||||
tot_size += cur_entry.block_size;
|
||||
cur_entry = next_entry;
|
||||
cur_valid = next_valid;
|
||||
}
|
||||
|
||||
if (cur_entry.block_size + tot_size >= size) {
|
||||
break;
|
||||
}
|
||||
|
||||
next_valid = impl.ContinueTraversal(next_entry, context);
|
||||
}
|
||||
|
||||
// Add the last block.
|
||||
if (cur_valid) {
|
||||
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
|
||||
}
|
||||
}
|
||||
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
||||
|
||||
// Create an update allocator.
|
||||
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
||||
Result allocator_result{ResultSuccess};
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||
m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
||||
// We're going to perform an update, so create a helper.
|
||||
// KScopedPageTableUpdater updater(this);
|
||||
|
||||
// Separate the mapping.
|
||||
R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
|
||||
KMemoryPermission::None, OperationType::Separate));
|
||||
|
||||
// Reset the current tracking address, and make sure we clean up on failure.
|
||||
cur_address = address;
|
||||
auto remap_guard = detail::ScopeExit([&] {
|
||||
if (cur_address > address) {
|
||||
const VAddr last_map_address = cur_address - 1;
|
||||
cur_address = address;
|
||||
|
||||
// Iterate over the memory we unmapped.
|
||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||
auto pg_it = pg.Nodes().begin();
|
||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
||||
size_t pg_pages = pg_it->GetNumPages();
|
||||
|
||||
while (true) {
|
||||
// Get the memory info for the pages we unmapped, convert to property.
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
// If the memory is normal, we unmapped it and need to re-map it.
|
||||
if (info.GetState() == KMemoryState::Normal) {
|
||||
// Determine the range to map.
|
||||
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
|
||||
last_map_address + 1 - cur_address) /
|
||||
PageSize;
|
||||
|
||||
// While we have pages to map, map them.
|
||||
while (map_pages > 0) {
|
||||
// Check if we're at the end of the physical block.
|
||||
if (pg_pages == 0) {
|
||||
// Ensure there are more pages to map.
|
||||
ASSERT(pg_it != pg.Nodes().end());
|
||||
|
||||
// Advance our physical block.
|
||||
++pg_it;
|
||||
pg_phys_addr = pg_it->GetAddress();
|
||||
pg_pages = pg_it->GetNumPages();
|
||||
}
|
||||
|
||||
// Map whatever we can.
|
||||
const size_t cur_pages = std::min(pg_pages, map_pages);
|
||||
ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
|
||||
OperationType::Map, pg_phys_addr) == ResultSuccess);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
map_pages -= cur_pages;
|
||||
|
||||
pg_phys_addr += cur_pages * PageSize;
|
||||
pg_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
if (last_map_address <= info.GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
++it;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Iterate over the memory, unmapping as we go.
|
||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||
|
@ -1145,8 +1099,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
last_address + 1 - cur_address) /
|
||||
PageSize;
|
||||
|
||||
// HACK: Manually close the pages.
|
||||
HACK_ClosePages(cur_address, cur_pages);
|
||||
|
||||
// Unmap.
|
||||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
|
||||
.IsSuccess());
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
|
@ -1161,8 +1119,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
|
||||
// Release the memory resource.
|
||||
m_mapped_physical_memory_size -= mapped_size;
|
||||
auto process{m_system.Kernel().CurrentProcess()};
|
||||
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
|
||||
m_resource_limit->Release(LimitableResource::PhysicalMemory, mapped_size);
|
||||
|
||||
// Update memory blocks.
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
||||
|
@ -1170,14 +1127,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
// TODO(bunnei): This is a workaround until the next set of changes, where we add reference
|
||||
// counting for mapped pages. Until then, we must manually close the reference to the page
|
||||
// group.
|
||||
m_system.Kernel().MemoryManager().Close(pg);
|
||||
|
||||
// We succeeded.
|
||||
remap_guard.Cancel();
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
|
@ -1753,8 +1703,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
|||
OperationType::Unmap));
|
||||
|
||||
// Release the memory from the resource limit.
|
||||
m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
|
||||
LimitableResource::PhysicalMemory, num_pages * PageSize);
|
||||
m_resource_limit->Release(LimitableResource::PhysicalMemory, num_pages * PageSize);
|
||||
|
||||
// Apply the memory block update.
|
||||
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
|
||||
|
@ -1784,8 +1733,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
|||
|
||||
// Reserve memory for the heap extension.
|
||||
KScopedResourceReservation memory_reservation(
|
||||
m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
|
||||
allocation_size);
|
||||
m_resource_limit, LimitableResource::PhysicalMemory, allocation_size);
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the heap extension.
|
||||
|
@ -1873,7 +1821,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
|||
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
||||
} else {
|
||||
KPageGroup page_group;
|
||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||
&page_group, needed_num_pages,
|
||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
||||
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
|
||||
|
@ -1887,8 +1835,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
|||
return addr;
|
||||
}
|
||||
|
||||
Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
|
||||
bool is_aligned) {
|
||||
Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
|
||||
KMemoryPermission perm, bool is_aligned,
|
||||
bool check_heap) {
|
||||
// Lightly validate the range before doing anything else.
|
||||
const size_t num_pages = size / PageSize;
|
||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||
|
@ -1898,15 +1847,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
|
|||
|
||||
// Check the memory state.
|
||||
const auto test_state =
|
||||
(is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
|
||||
(is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
|
||||
(check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
|
||||
size_t num_allocator_blocks;
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
|
||||
KMemoryState old_state;
|
||||
R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
|
||||
std::addressof(num_allocator_blocks), address, size, test_state,
|
||||
test_state, perm, perm,
|
||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
|
||||
KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
|
||||
|
||||
// Create an update allocator.
|
||||
Result allocator_result{ResultSuccess};
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||
m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
@ -1915,10 +1867,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
|
|||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
||||
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
||||
|
||||
// Set whether the locked memory was io.
|
||||
*out_is_io = old_state == KMemoryState::Io;
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
||||
Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {
|
||||
// Lightly validate the range before doing anything else.
|
||||
const size_t num_pages = size / PageSize;
|
||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||
|
@ -1927,16 +1882,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
|||
KScopedLightLock lk(m_general_lock);
|
||||
|
||||
// Check the memory state.
|
||||
const auto test_state = KMemoryState::FlagCanDeviceMap |
|
||||
(check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
|
||||
size_t num_allocator_blocks;
|
||||
R_TRY(this->CheckMemoryStateContiguous(
|
||||
std::addressof(num_allocator_blocks), address, size,
|
||||
KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
|
||||
KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
|
||||
std::addressof(num_allocator_blocks), address, size, test_state, test_state,
|
||||
KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
|
||||
|
||||
// Create an update allocator.
|
||||
Result allocator_result{ResultSuccess};
|
||||
Result allocator_result;
|
||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||
m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
@ -2070,6 +2025,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
|
|||
m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
|
||||
break;
|
||||
}
|
||||
case OperationType::Separate: {
|
||||
// HACK: Unimplemented.
|
||||
break;
|
||||
}
|
||||
case OperationType::ChangePermissions:
|
||||
case OperationType::ChangePermissionsAndRefresh:
|
||||
break;
|
||||
|
@ -2105,6 +2064,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
|||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return m_alias_code_region_start;
|
||||
case KMemoryState::Code:
|
||||
case KMemoryState::CodeData:
|
||||
|
@ -2140,6 +2100,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
|||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
case KMemoryState::Code:
|
||||
case KMemoryState::CodeData:
|
||||
|
@ -2181,6 +2142,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
|
|||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return is_in_region && !is_in_heap && !is_in_alias;
|
||||
case KMemoryState::Normal:
|
||||
ASSERT(is_in_heap);
|
||||
|
|
|
@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
|
|||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||
return result;
|
||||
}
|
||||
bool is_out_io{};
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
|
||||
Kernel::KMemoryPermission::None, true)
|
||||
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
|
||||
handle_description->size,
|
||||
Kernel::KMemoryPermission::None, true, false)
|
||||
.IsSuccess());
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return result;
|
||||
|
|
Loading…
Reference in a new issue