kernel: fix page leak on process termination

This commit is contained in:
Liam 2024-01-07 13:59:48 -05:00
parent f7a3c135e2
commit f2fed21c11
4 changed files with 91 additions and 25 deletions

View file

@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
#include "common/page_table.h" #include "common/page_table.h"
#include "common/scope_exit.h"
namespace Common { namespace Common {
@ -11,29 +12,10 @@ PageTable::~PageTable() noexcept = default;
bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context,
Common::ProcessAddress address) const { Common::ProcessAddress address) const {
// Setup invalid defaults. out_context->next_offset = GetInteger(address);
out_entry->phys_addr = 0; out_context->next_page = address / page_size;
out_entry->block_size = page_size;
out_context->next_page = 0;
// Validate that we can read the actual entry. return this->ContinueTraversal(out_entry, out_context);
const auto page = address / page_size;
if (page >= backing_addr.size()) {
return false;
}
// Validate that the entry is mapped.
const auto phys_addr = backing_addr[page];
if (phys_addr == 0) {
return false;
}
// Populate the results.
out_entry->phys_addr = phys_addr + GetInteger(address);
out_context->next_page = page + 1;
out_context->next_offset = GetInteger(address) + page_size;
return true;
} }
bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const {
@ -41,6 +23,12 @@ bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* c
out_entry->phys_addr = 0; out_entry->phys_addr = 0;
out_entry->block_size = page_size; out_entry->block_size = page_size;
// Regardless of whether the page was mapped, advance on exit.
SCOPE_EXIT({
context->next_page += 1;
context->next_offset += page_size;
});
// Validate that we can read the actual entry. // Validate that we can read the actual entry.
const auto page = context->next_page; const auto page = context->next_page;
if (page >= backing_addr.size()) { if (page >= backing_addr.size()) {
@ -55,8 +43,6 @@ bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* c
// Populate the results. // Populate the results.
out_entry->phys_addr = phys_addr + context->next_offset; out_entry->phys_addr = phys_addr + context->next_offset;
context->next_page = page + 1;
context->next_offset += page_size;
return true; return true;
} }

View file

@ -431,9 +431,82 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
m_memory_block_slab_manager)); m_memory_block_slab_manager));
} }
Result KPageTableBase::FinalizeProcess() {
// Only process tables should be finalized.
ASSERT(!this->IsKernel());
// HLE processes don't have memory mapped.
R_SUCCEED_IF(m_impl == nullptr);
// NOTE: Here Nintendo calls an unknown OnFinalize function.
// this->OnFinalize();
// NOTE: Here Nintendo calls a second unknown OnFinalize function.
// this->OnFinalize2();
// Get implementation objects.
auto& impl = this->GetImpl();
auto& mm = m_kernel.MemoryManager();
// Traverse, freeing all pages.
{
// Get the address space size.
const size_t as_size = this->GetAddressSpaceSize();
// Begin the traversal.
TraversalContext context;
TraversalEntry cur_entry = {
.phys_addr = 0,
.block_size = 0,
};
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
size_t tot_size = 0;
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context),
this->GetAddressSpaceStart());
// Iterate over entries.
while (true) {
if ((!next_valid && !cur_valid) ||
(next_valid && cur_valid &&
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
}
// Update tracking variables.
tot_size += cur_entry.block_size;
cur_entry = next_entry;
cur_valid = next_valid;
}
if (cur_entry.block_size + tot_size >= as_size) {
break;
}
next_valid =
impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
}
// Handle the last block.
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
}
}
R_SUCCEED();
}
void KPageTableBase::Finalize() { void KPageTableBase::Finalize() {
this->FinalizeProcess();
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
if (Settings::IsFastmemEnabled()) { if (m_impl->fastmem_arena) {
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false); m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
} }
}; };

View file

@ -241,6 +241,7 @@ public:
KResourceLimit* resource_limit, Core::Memory::Memory& memory, KResourceLimit* resource_limit, Core::Memory::Memory& memory,
KProcessAddress aslr_space_start); KProcessAddress aslr_space_start);
Result FinalizeProcess();
void Finalize(); void Finalize();
bool IsKernel() const { bool IsKernel() const {

View file

@ -171,6 +171,12 @@ void KProcess::Finalize() {
m_resource_limit->Close(); m_resource_limit->Close();
} }
// Clear expensive resources, as the destructor is not called for guest objects.
for (auto& interface : m_arm_interfaces) {
interface.reset();
}
m_exclusive_monitor.reset();
// Perform inherited finalization. // Perform inherited finalization.
KSynchronizationObject::Finalize(); KSynchronizationObject::Finalize();
} }