From 19291ba4655a9dac59a0222efe0e019b7e56fe3b Mon Sep 17 00:00:00 2001 From: NarcolepticK Date: Tue, 11 Sep 2018 14:34:18 -0400 Subject: [PATCH 1/4] LLE Mapped Buffer: Add unmapping, zero-size, and multiple page handling --- src/core/hle/kernel/ipc.cpp | 49 ++++++++++++++++++++++++-------- src/core/hle/kernel/vm_manager.h | 3 ++ 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 8db9d241f..8a6214409 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -128,7 +128,11 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr> Memory::PAGE_BITS; - ASSERT(num_pages >= 1); + // Skip when the size is zero + if (size == 0) { + i += 1; + break; + } if (reply) { // TODO(Subv): Scan the target's command buffer to make sure that there was a @@ -139,13 +143,23 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtrvm_manager.UnmapRange( + page_start, num_pages * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + } else { + const auto vma_iter = src_process->vm_manager.vma_map.find(source_address); + const auto& vma = vma_iter->second; + const VAddr dest_address = vma.originating_buffer_address; + + auto buffer = std::make_shared>(size); + Memory::ReadBlock(*src_process, source_address, buffer->data(), size); + Memory::WriteBlock(*dst_process, dest_address, buffer->data(), size); + ResultCode result = src_process->vm_manager.UnmapRange( page_start, num_pages * Memory::PAGE_SIZE); ASSERT(result == RESULT_SUCCESS); } - ASSERT_MSG(permissions == IPC::MappedBufferPermissions::R, - "Unmapping Write MappedBuffers is unimplemented"); i += 1; break; } @@ -156,19 +170,13 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr src_thread, SharedPtr(buffer->size()), Kernel::MemoryState::Shared) .Unwrap(); + } else { + auto buffer = std::make_shared>(num_pages * Memory::PAGE_SIZE); + Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); + + // Map the pages into the target process' address space. + target_address = + dst_process->vm_manager + .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR + Memory::PAGE_SIZE, + Memory::IPC_MAPPING_SIZE - Memory::PAGE_SIZE, buffer, + 0, static_cast(buffer->size()), + Kernel::MemoryState::Shared) + .Unwrap(); } + // Save the original address we copied the buffer from so that we can copy the modified + // buffer back, if needed + auto vma_iter = dst_process->vm_manager.vma_map.find(target_address + page_offset); + auto& vma = vma_iter->second; + vma.originating_buffer_address = source_address; cmd_buf[i++] = target_address + page_offset; break; diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 7ac5c3b01..fcb107c06 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -86,6 +86,9 @@ struct VirtualMemoryArea { PAddr paddr = 0; Memory::MMIORegionPointer mmio_handler = nullptr; + /// Originating address of the IPC mapped buffer + VAddr originating_buffer_address = 0; + /// Tests if this area can be merged to the right with `next`. bool CanBeMergedWith(const VirtualMemoryArea& next) const; }; From 51d53a62815dfb8fd20655403af58aed357e2637 Mon Sep 17 00:00:00 2001 From: NarcolepticK Date: Sun, 30 Sep 2018 21:21:51 -0400 Subject: [PATCH 2/4] LLE Mapped Buffer: addressed comments --- src/core/hle/kernel/ipc.cpp | 159 +++++++++++++++++-------------- src/core/hle/kernel/vm_manager.h | 3 - src/core/memory.cpp | 52 ++++++++++ src/core/memory.h | 2 + 4 files changed, 140 insertions(+), 76 deletions(-) diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 8a6214409..070bb6619 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -33,6 +33,17 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr cmd_buf; Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32)); + // Create a copy of the target's command buffer + IPC::Header dst_header; + Memory::ReadBlock(*dst_process, dst_address, &dst_header.raw, sizeof(dst_header.raw)); + + std::size_t dst_untranslated_size = 1u + dst_header.normal_params_size; + std::size_t dst_command_size = dst_untranslated_size + dst_header.translate_params_size; + + std::array dst_cmd_buf; + Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(), + dst_command_size * sizeof(u32)); + std::size_t i = untranslated_size; while (i < command_size) { u32 descriptor = cmd_buf[i]; @@ -128,36 +139,63 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr> Memory::PAGE_BITS; - // Skip when the size is zero + // Skip when the size is zero and num_pages == 0 if (size == 0) { - i += 1; + cmd_buf[i++] = 0; break; } + ASSERT(num_pages >= 1); if (reply) { - // TODO(Subv): Scan the target's command buffer to make sure that there was a - // MappedBuffer descriptor in the original request. The real kernel panics if you - // try to reply with an unsolicited MappedBuffer. + // Scan the target's command buffer for the matching mapped buffer + std::size_t j = dst_untranslated_size; + while (j < dst_command_size) { + u32 desc = dst_cmd_buf[j++]; - // Unmap the buffers. Readonly buffers do not need to be copied over to the target - // process again because they were (presumably) not modified. This behavior is - // consistent with the real kernel. - if (permissions == IPC::MappedBufferPermissions::R) { - ResultCode result = src_process->vm_manager.UnmapRange( - page_start, num_pages * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - } else { - const auto vma_iter = src_process->vm_manager.vma_map.find(source_address); - const auto& vma = vma_iter->second; - const VAddr dest_address = vma.originating_buffer_address; + if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) { + IPC::MappedBufferDescInfo dest_descInfo{desc}; + VAddr dest_address = dst_cmd_buf[j]; - auto buffer = std::make_shared>(size); - Memory::ReadBlock(*src_process, source_address, buffer->data(), size); - Memory::WriteBlock(*dst_process, dest_address, buffer->data(), size); + u32 dest_size = static_cast(dest_descInfo.size); + IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; - ResultCode result = src_process->vm_manager.UnmapRange( - page_start, num_pages * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); + if (permissions == dest_permissions && size == dest_size) { + // Readonly buffers do not need to be copied over to the target + // process again because they were (presumably) not modified. This + // behavior is consistent with the real kernel. + if (permissions != IPC::MappedBufferPermissions::R) { + // Copy the modified buffer back into the target process + Memory::CopyBlock(*src_process, *dst_process, source_address, + dest_address, size); + } + + // Unmap the Reserved page before the buffer + ResultCode result = src_process->vm_manager.UnmapRange( + page_start - Memory::PAGE_SIZE, Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + // Unmap the buffer from the source process + result = src_process->vm_manager.UnmapRange( + page_start, num_pages * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + // Check if this is the last mapped buffer + VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; + auto& vma = + src_process->vm_manager.FindVMA(next_reserve + Memory::PAGE_SIZE) + ->second; + if (vma.type == VMAType::Free) { + // Unmap the Reserved page after the last buffer + result = src_process->vm_manager.UnmapRange(next_reserve, + Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + } + + break; + } + } + + j += 1; } i += 1; @@ -166,63 +204,38 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr bool { - return (address & Memory::PAGE_MASK) == 0; - }; - // TODO(Subv): Perform permission checks. - // TODO(Subv): Leave a page of unmapped memory before the first page and after the last - // page. + // Reserve a page of memory before the mapped buffer + auto reserve_buffer = std::make_shared>(Memory::PAGE_SIZE); + dst_process->vm_manager.MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, + static_cast(reserve_buffer->size()), Kernel::MemoryState::Reserved); - if (num_pages == 1 && !IsPageAligned(source_address) && - !IsPageAligned(source_address + size)) { - // If the address of the source buffer is not page-aligned or if the buffer doesn't - // fill an entire page, then we have to allocate a page of memory in the target - // process and copy over the data from the input buffer. This allocated buffer will - // be copied back to the source process and deallocated when the server replies to - // the request via ReplyAndReceive. + auto buffer = std::make_shared>(num_pages * Memory::PAGE_SIZE); + Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); - auto buffer = std::make_shared>(Memory::PAGE_SIZE); - - // Number of bytes until the next page. - std::size_t difference_to_page = - Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address; - // If the data fits in one page we can just copy the required size instead of the - // entire page. - std::size_t read_size = - num_pages == 1 ? static_cast(size) : difference_to_page; - - Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, - read_size); - - // Map the page into the target process' address space. - target_address = - dst_process->vm_manager - .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, - buffer, 0, static_cast(buffer->size()), - Kernel::MemoryState::Shared) - .Unwrap(); - } else { - auto buffer = std::make_shared>(num_pages * Memory::PAGE_SIZE); - Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); - - // Map the pages into the target process' address space. - target_address = - dst_process->vm_manager - .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR + Memory::PAGE_SIZE, - Memory::IPC_MAPPING_SIZE - Memory::PAGE_SIZE, buffer, - 0, static_cast(buffer->size()), - Kernel::MemoryState::Shared) - .Unwrap(); - } - // Save the original address we copied the buffer from so that we can copy the modified - // buffer back, if needed - auto vma_iter = dst_process->vm_manager.vma_map.find(target_address + page_offset); - auto& vma = vma_iter->second; - vma.originating_buffer_address = source_address; + // Map the page(s) into the target process' address space. + target_address = dst_process->vm_manager + .MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, buffer, 0, + static_cast(buffer->size()), Kernel::MemoryState::Shared) + .Unwrap(); cmd_buf[i++] = target_address + page_offset; + + // Check if this is the last mapped buffer + if (i < command_size) { + u32 next_descriptor = cmd_buf[i]; + if (IPC::GetDescriptorType(next_descriptor) == IPC::DescriptorType::MappedBuffer) { + break; + } + } + + // Reserve a page of memory after the last mapped buffer + dst_process->vm_manager.MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, + static_cast(reserve_buffer->size()), Kernel::MemoryState::Reserved); break; } default: diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index fcb107c06..7ac5c3b01 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -86,9 +86,6 @@ struct VirtualMemoryArea { PAddr paddr = 0; Memory::MMIORegionPointer mmio_handler = nullptr; - /// Originating address of the IPC mapped buffer - VAddr originating_buffer_address = 0; - /// Tests if this area can be merged to the right with `next`. bool CanBeMergedWith(const VirtualMemoryArea& next) const; }; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 26967ad36..70baef93a 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -700,6 +700,58 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const std::size_t size) { CopyBlock(*Kernel::g_current_process, dest_addr, src_addr, size); } +void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, + VAddr src_addr, VAddr dest_addr, std::size_t size) { + auto& page_table = src_process.vm_manager.page_table; + std::size_t remaining_size = size; + std::size_t page_index = src_addr >> PAGE_BITS; + std::size_t page_offset = src_addr & PAGE_MASK; + + while (remaining_size > 0) { + const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); + const VAddr current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + + switch (page_table.attributes[page_index]) { + case PageType::Unmapped: { + LOG_ERROR(HW_Memory, + "unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})", + current_vaddr, src_addr, size); + ZeroBlock(dest_process, dest_addr, copy_amount); + break; + } + case PageType::Memory: { + DEBUG_ASSERT(page_table.pointers[page_index]); + const u8* src_ptr = page_table.pointers[page_index] + page_offset; + WriteBlock(dest_process, dest_addr, src_ptr, copy_amount); + break; + } + case PageType::Special: { + MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); + DEBUG_ASSERT(handler); + std::vector buffer(copy_amount); + handler->ReadBlock(current_vaddr, buffer.data(), buffer.size()); + WriteBlock(dest_process, dest_addr, buffer.data(), buffer.size()); + break; + } + case PageType::RasterizerCachedMemory: { + RasterizerFlushVirtualRegion(current_vaddr, static_cast(copy_amount), + FlushMode::Flush); + WriteBlock(dest_process, dest_addr, GetPointerFromVMA(src_process, current_vaddr), + copy_amount); + break; + } + default: + UNREACHABLE(); + } + + page_index++; + page_offset = 0; + dest_addr += static_cast(copy_amount); + src_addr += static_cast(copy_amount); + remaining_size -= copy_amount; + } +} + template <> u8 ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr) { return mmio_handler->Read8(addr); diff --git a/src/core/memory.h b/src/core/memory.h index 73dbe091c..e78754705 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -205,6 +205,8 @@ void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, const std::size_ void ZeroBlock(VAddr dest_addr, const std::size_t size); void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size); void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); +void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, + VAddr src_addr, VAddr dest_addr, std::size_t size); u8* GetPointer(VAddr vaddr); From 32aecd42a2c7b011c7061889fa3343b0efe1862a Mon Sep 17 00:00:00 2001 From: NarcolepticK Date: Mon, 1 Oct 2018 21:07:25 -0400 Subject: [PATCH 3/4] LLE Mapped Buffer: Corrected behavior --- src/core/hle/kernel/ipc.cpp | 77 +++++++++++++++---------------------- 1 file changed, 30 insertions(+), 47 deletions(-) diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 070bb6619..9875fc68a 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -39,6 +39,7 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr dst_cmd_buf; Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(), @@ -148,54 +149,44 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr(dest_descInfo.size); IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; - if (permissions == dest_permissions && size == dest_size) { - // Readonly buffers do not need to be copied over to the target - // process again because they were (presumably) not modified. This - // behavior is consistent with the real kernel. - if (permissions != IPC::MappedBufferPermissions::R) { - // Copy the modified buffer back into the target process - Memory::CopyBlock(*src_process, *dst_process, source_address, - dest_address, size); - } - - // Unmap the Reserved page before the buffer - ResultCode result = src_process->vm_manager.UnmapRange( - page_start - Memory::PAGE_SIZE, Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - - // Unmap the buffer from the source process - result = src_process->vm_manager.UnmapRange( - page_start, num_pages * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - - // Check if this is the last mapped buffer - VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; - auto& vma = - src_process->vm_manager.FindVMA(next_reserve + Memory::PAGE_SIZE) - ->second; - if (vma.type == VMAType::Free) { - // Unmap the Reserved page after the last buffer - result = src_process->vm_manager.UnmapRange(next_reserve, - Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - } - - break; + ASSERT(permissions == dest_permissions && size == dest_size); + // Readonly buffers do not need to be copied over to the target + // process again because they were (presumably) not modified. This + // behavior is consistent with the real kernel. + if (permissions != IPC::MappedBufferPermissions::R) { + // Copy the modified buffer back into the target process + Memory::CopyBlock(*src_process, *dst_process, source_address, + dest_address, size); } + + VAddr prev_reserve = page_start - Memory::PAGE_SIZE; + VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; + + auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second; + auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second; + ASSERT(prev_vma.meminfo_state == MemoryState::Reserved && + next_vma.meminfo_state == MemoryState::Reserved); + + // Unmap the buffer and guard pages from the source process + ResultCode result = src_process->vm_manager.UnmapRange( + page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + target_index += 1; + break; } - j += 1; + target_index += 1; } i += 1; @@ -224,15 +215,7 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtrvm_manager.MapMemoryBlockToBase( Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, static_cast(reserve_buffer->size()), Kernel::MemoryState::Reserved); From c97146226a57419791d5a234d8c63f87ef79b57a Mon Sep 17 00:00:00 2001 From: NarcolepticK Date: Mon, 29 Oct 2018 18:35:34 -0400 Subject: [PATCH 4/4] LLE Mapped Buffer: Addressed comments. --- src/core/hle/kernel/ipc.cpp | 107 ++++++++++++++++++++++-------------- 1 file changed, 67 insertions(+), 40 deletions(-) diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 9875fc68a..4fe2c9c1d 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -14,6 +14,68 @@ namespace Kernel { +void ScanForAndUnmapBuffer(std::array& dst_cmd_buf, + const std::size_t dst_command_size, std::size_t& target_index, + SharedPtr src_process, SharedPtr dst_process, + const VAddr source_address, const VAddr page_start, const u32 num_pages, + const u32 size, const IPC::MappedBufferPermissions permissions) { + while (target_index < dst_command_size) { + u32 desc = dst_cmd_buf[target_index++]; + + if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CopyHandle || + IPC::GetDescriptorType(desc) == IPC::DescriptorType::MoveHandle) { + u32 num_handles = IPC::HandleNumberFromDesc(desc); + for (u32 j = 0; j < num_handles; ++j) { + target_index += 1; + } + continue; + } + + if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CallingPid || + IPC::GetDescriptorType(desc) == IPC::DescriptorType::StaticBuffer) { + target_index += 1; + continue; + } + + if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) { + VAddr dest_address = dst_cmd_buf[target_index]; + IPC::MappedBufferDescInfo dest_descInfo{desc}; + u32 dest_size = static_cast(dest_descInfo.size); + IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; + + if (dest_size == 0) { + target_index += 1; + continue; + } + + ASSERT(permissions == dest_permissions && size == dest_size); + // Readonly buffers do not need to be copied over to the target + // process again because they were (presumably) not modified. This + // behavior is consistent with the real kernel. + if (permissions != IPC::MappedBufferPermissions::R) { + // Copy the modified buffer back into the target process + Memory::CopyBlock(*src_process, *dst_process, source_address, dest_address, size); + } + + VAddr prev_reserve = page_start - Memory::PAGE_SIZE; + VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; + + auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second; + auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second; + ASSERT(prev_vma.meminfo_state == MemoryState::Reserved && + next_vma.meminfo_state == MemoryState::Reserved); + + // Unmap the buffer and guard pages from the source process + ResultCode result = src_process->vm_manager.UnmapRange( + page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + + target_index += 1; + break; + } + } +} + ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr dst_thread, VAddr src_address, VAddr dst_address, bool reply) { @@ -148,46 +210,11 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr= 1); if (reply) { - // Scan the target's command buffer for the matching mapped buffer - while (target_index < dst_command_size) { - u32 desc = dst_cmd_buf[target_index++]; - - if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) { - IPC::MappedBufferDescInfo dest_descInfo{desc}; - VAddr dest_address = dst_cmd_buf[target_index]; - - u32 dest_size = static_cast(dest_descInfo.size); - IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; - - ASSERT(permissions == dest_permissions && size == dest_size); - // Readonly buffers do not need to be copied over to the target - // process again because they were (presumably) not modified. This - // behavior is consistent with the real kernel. - if (permissions != IPC::MappedBufferPermissions::R) { - // Copy the modified buffer back into the target process - Memory::CopyBlock(*src_process, *dst_process, source_address, - dest_address, size); - } - - VAddr prev_reserve = page_start - Memory::PAGE_SIZE; - VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; - - auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second; - auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second; - ASSERT(prev_vma.meminfo_state == MemoryState::Reserved && - next_vma.meminfo_state == MemoryState::Reserved); - - // Unmap the buffer and guard pages from the source process - ResultCode result = src_process->vm_manager.UnmapRange( - page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE); - ASSERT(result == RESULT_SUCCESS); - - target_index += 1; - break; - } - - target_index += 1; - } + // Scan the target's command buffer for the matching mapped buffer. + // The real kernel panics if you try to reply with an unsolicited MappedBuffer. + ScanForAndUnmapBuffer(dst_cmd_buf, dst_command_size, target_index, src_process, + dst_process, source_address, page_start, num_pages, size, + permissions); i += 1; break;