mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-21 07:50:56 +01:00
Merge pull request #8443 from liamwhite/code-mem
kernel: fix KCodeMemory initialization
This commit is contained in:
commit
ec85eac3c9
3 changed files with 118 additions and 26 deletions
|
@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
|
||||||
auto& page_table = m_owner->PageTable();
|
auto& page_table = m_owner->PageTable();
|
||||||
|
|
||||||
// Construct the page group.
|
// Construct the page group.
|
||||||
m_page_group =
|
m_page_group = {};
|
||||||
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
|
|
||||||
|
|
||||||
// Lock the memory.
|
// Lock the memory.
|
||||||
R_TRY(page_table.LockForCodeMemory(addr, size))
|
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
|
||||||
|
|
||||||
// Clear the memory.
|
// Clear the memory.
|
||||||
//
|
for (const auto& block : m_page_group.Nodes()) {
|
||||||
// FIXME: this ends up clobbering address ranges outside the scope of the mapping within
|
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||||
// guest memory, and is not specifically required if the guest program is correctly
|
}
|
||||||
// written, so disable until this is further investigated.
|
|
||||||
//
|
|
||||||
// for (const auto& block : m_page_group.Nodes()) {
|
|
||||||
// std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Set remaining tracking members.
|
// Set remaining tracking members.
|
||||||
|
m_owner->Open();
|
||||||
m_address = addr;
|
m_address = addr;
|
||||||
m_is_initialized = true;
|
m_is_initialized = true;
|
||||||
m_is_owner_mapped = false;
|
m_is_owner_mapped = false;
|
||||||
|
@ -57,8 +52,14 @@ void KCodeMemory::Finalize() {
|
||||||
// Unlock.
|
// Unlock.
|
||||||
if (!m_is_mapped && !m_is_owner_mapped) {
|
if (!m_is_mapped && !m_is_owner_mapped) {
|
||||||
const size_t size = m_page_group.GetNumPages() * PageSize;
|
const size_t size = m_page_group.GetNumPages() * PageSize;
|
||||||
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
|
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close the page group.
|
||||||
|
m_page_group = {};
|
||||||
|
|
||||||
|
// Close our reference to our owner.
|
||||||
|
m_owner->Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||||
|
@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
|
||||||
k_perm = KMemoryPermission::UserReadExecute;
|
k_perm = KMemoryPermission::UserReadExecute;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
// Already validated by ControlCodeMemory svc
|
||||||
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
|
|
|
@ -542,6 +542,95 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
|
||||||
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
const auto& pg = pg_ll.Nodes();
|
||||||
|
const auto& memory_layout = system.Kernel().MemoryLayout();
|
||||||
|
|
||||||
|
// Empty groups are necessarily invalid.
|
||||||
|
if (pg.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to validate that the group we'd expect is the group we see.
|
||||||
|
auto cur_it = pg.begin();
|
||||||
|
PAddr cur_block_address = cur_it->GetAddress();
|
||||||
|
size_t cur_block_pages = cur_it->GetNumPages();
|
||||||
|
|
||||||
|
auto UpdateCurrentIterator = [&]() {
|
||||||
|
if (cur_block_pages == 0) {
|
||||||
|
if ((++cur_it) == pg.end()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_block_address = cur_it->GetAddress();
|
||||||
|
cur_block_pages = cur_it->GetNumPages();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Begin traversal.
|
||||||
|
Common::PageTable::TraversalContext context;
|
||||||
|
Common::PageTable::TraversalEntry next_entry;
|
||||||
|
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare tracking variables.
|
||||||
|
PAddr cur_addr = next_entry.phys_addr;
|
||||||
|
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
|
||||||
|
size_t tot_size = cur_size;
|
||||||
|
|
||||||
|
// Iterate, comparing expected to actual.
|
||||||
|
while (tot_size < size) {
|
||||||
|
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
||||||
|
const size_t cur_pages = cur_size / PageSize;
|
||||||
|
|
||||||
|
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!UpdateCurrentIterator()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_block_address += cur_size;
|
||||||
|
cur_block_pages -= cur_pages;
|
||||||
|
cur_addr = next_entry.phys_addr;
|
||||||
|
cur_size = next_entry.block_size;
|
||||||
|
} else {
|
||||||
|
cur_size += next_entry.block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
tot_size += next_entry.block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we compare the right amount for the last block.
|
||||||
|
if (tot_size > size) {
|
||||||
|
cur_size -= (tot_size - size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!UpdateCurrentIterator()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||||
KPageTable& src_page_table, VAddr src_addr) {
|
KPageTable& src_page_table, VAddr src_addr) {
|
||||||
KScopedLightLock lk(general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
|
@ -1687,22 +1776,22 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
|
ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
|
||||||
return this->LockMemoryAndOpen(
|
return this->LockMemoryAndOpen(
|
||||||
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||||
KMemoryAttribute::All, KMemoryAttribute::None,
|
KMemoryAttribute::None,
|
||||||
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
||||||
KMemoryPermission::KernelReadWrite),
|
KMemoryPermission::KernelReadWrite),
|
||||||
KMemoryAttribute::Locked);
|
KMemoryAttribute::Locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
|
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
|
||||||
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
|
const KPageLinkedList& pg) {
|
||||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
|
return this->UnlockMemory(
|
||||||
KMemoryPermission::None, KMemoryAttribute::All,
|
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
|
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
|
||||||
KMemoryAttribute::Locked, nullptr);
|
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||||
|
@ -2125,7 +2214,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_
|
||||||
|
|
||||||
// Check the page group.
|
// Check the page group.
|
||||||
if (pg != nullptr) {
|
if (pg != nullptr) {
|
||||||
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
|
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decide on new perm and attr.
|
// Decide on new perm and attr.
|
||||||
|
|
|
@ -72,8 +72,8 @@ public:
|
||||||
KMemoryPermission perm, PAddr map_addr = 0);
|
KMemoryPermission perm, PAddr map_addr = 0);
|
||||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||||
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
|
ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
|
||||||
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
|
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
|
||||||
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||||
KMemoryState state_mask, KMemoryState state,
|
KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
|
@ -178,6 +178,7 @@ private:
|
||||||
const KPageLinkedList* pg);
|
const KPageLinkedList* pg);
|
||||||
|
|
||||||
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||||
|
bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const {
|
bool IsLockedByCurrentThread() const {
|
||||||
return general_lock.IsLockedByCurrentThread();
|
return general_lock.IsLockedByCurrentThread();
|
||||||
|
|
Loading…
Reference in a new issue