diff --git a/src/core/hle/kernel/ipc.cpp b/src/core/hle/kernel/ipc.cpp index 517069ef8..0907b105c 100644 --- a/src/core/hle/kernel/ipc.cpp +++ b/src/core/hle/kernel/ipc.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/alignment.h" #include "core/hle/ipc.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/ipc.h" @@ -14,7 +15,7 @@ namespace Kernel { ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr dst_thread, - VAddr src_address, VAddr dst_address) { + VAddr src_address, VAddr dst_address, bool reply) { auto& src_process = src_thread->owner_process; auto& dst_process = dst_thread->owner_process; @@ -115,6 +116,88 @@ ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr> Memory::PAGE_BITS; + + ASSERT(num_pages >= 1); + + if (reply) { + // TODO(Subv): Scan the target's command buffer to make sure that there was a + // MappedBuffer descriptor in the original request. The real kernel panics if you + // try to reply with an unsolicited MappedBuffer. + + // Unmap the buffers. Readonly buffers do not need to be copied over to the target + // process again because they were (presumably) not modified. This behavior is + // consistent with the real kernel. + if (permissions == IPC::MappedBufferPermissions::R) { + ResultCode result = src_process->vm_manager.UnmapRange( + page_start, num_pages * Memory::PAGE_SIZE); + ASSERT(result == RESULT_SUCCESS); + } + + ASSERT_MSG(permissions == IPC::MappedBufferPermissions::R, + "Unmapping Write MappedBuffers is unimplemented"); + i += 1; + break; + } + + VAddr target_address = 0; + + auto IsPageAligned = [](VAddr address) -> bool { + return (address & Memory::PAGE_MASK) == 0; + }; + + // TODO(Subv): Support more than 1 page and aligned page mappings + ASSERT_MSG( + num_pages == 1 && + (!IsPageAligned(source_address) || !IsPageAligned(source_address + size)), + "MappedBuffers of more than one page or aligned transfers are not implemented"); + + // TODO(Subv): Perform permission checks. + + // TODO(Subv): Leave a page of Reserved memory before the first page and after the last + // page. + + if (!IsPageAligned(source_address) || + (num_pages == 1 && !IsPageAligned(source_address + size))) { + // If the address of the source buffer is not page-aligned or if the buffer doesn't + // fill an entire page, then we have to allocate a page of memory in the target + // process and copy over the data from the input buffer. This allocated buffer will + // be copied back to the source process and deallocated when the server replies to + // the request via ReplyAndReceive. + + auto buffer = std::make_shared>(Memory::PAGE_SIZE); + + // Number of bytes until the next page. + size_t difference_to_page = + Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address; + // If the data fits in one page we can just copy the required size instead of the + // entire page. + size_t read_size = num_pages == 1 ? size : difference_to_page; + + Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, + read_size); + + // Map the page into the target process' address space. + target_address = dst_process->vm_manager + .MapMemoryBlockToBase( + Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, + buffer, 0, buffer->size(), Kernel::MemoryState::Shared) + .Unwrap(); + } + + cmd_buf[i++] = target_address + page_offset; + break; + } default: UNIMPLEMENTED_MSG("Unsupported handle translation: 0x%08X", descriptor); } diff --git a/src/core/hle/kernel/ipc.h b/src/core/hle/kernel/ipc.h index ac81d1ad4..d77fd1d1f 100644 --- a/src/core/hle/kernel/ipc.h +++ b/src/core/hle/kernel/ipc.h @@ -10,5 +10,5 @@ namespace Kernel { /// Performs IPC command buffer translation from one process to another. ResultCode TranslateCommandBuffer(SharedPtr src_thread, SharedPtr dst_thread, - VAddr src_address, VAddr dst_address); + VAddr src_address, VAddr dst_address, bool reply); } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 7f219ffde..34a71f28e 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -472,8 +472,8 @@ static ResultCode ReceiveIPCRequest(SharedPtr server_session, VAddr target_address = thread->GetCommandBufferAddress(); VAddr source_address = server_session->currently_handling->GetCommandBufferAddress(); - ResultCode translation_result = TranslateCommandBuffer(server_session->currently_handling, - thread, source_address, target_address); + ResultCode translation_result = TranslateCommandBuffer( + server_session->currently_handling, thread, source_address, target_address, false); // If a translation error occurred, immediately resume the client thread. if (translation_result.IsError()) { @@ -535,8 +535,8 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_ VAddr source_address = GetCurrentThread()->GetCommandBufferAddress(); VAddr target_address = request_thread->GetCommandBufferAddress(); - ResultCode translation_result = TranslateCommandBuffer(GetCurrentThread(), request_thread, - source_address, target_address); + ResultCode translation_result = TranslateCommandBuffer( + Kernel::GetCurrentThread(), request_thread, source_address, target_address, true); // Note: The real kernel seems to always panic if the Server->Client buffer translation // fails for whatever reason. diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 7a007c065..3cf84df9b 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -93,6 +93,36 @@ ResultVal VMManager::MapMemoryBlock(VAddr target, return MakeResult(MergeAdjacent(vma_handle)); } +ResultVal VMManager::MapMemoryBlockToBase(VAddr base, u32 region_size, + std::shared_ptr> block, + size_t offset, u32 size, MemoryState state) { + + // Find the first Free VMA. + VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) { + if (vma.second.type != VMAType::Free) + return false; + + VAddr vma_end = vma.second.base + vma.second.size; + return vma_end > base && vma_end >= base + size; + }); + + VAddr target = std::max(base, vma_handle->second.base); + + // Do not try to allocate the block if there are no available addresses within the desired + // region. + if (vma_handle == vma_map.end() || target + size > base + region_size) { + return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, + ErrorSummary::OutOfResource, ErrorLevel::Permanent); + } + + auto result = MapMemoryBlock(target, block, offset, size, state); + + if (result.Failed()) + return result.Code(); + + return MakeResult(target); +} + ResultVal VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state) { ASSERT(memory != nullptr); @@ -346,4 +376,4 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { break; } } -} +} // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 1302527bb..21159855c 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -144,6 +144,20 @@ public: ResultVal MapMemoryBlock(VAddr target, std::shared_ptr> block, size_t offset, u32 size, MemoryState state); + /** + * Maps part of a ref-counted block of memory at the first free address after the given base. + * + * @param base The base address to start the mapping at. + * @param region_size The max size of the region from where we'll try to find an address. + * @param block The block to be mapped. + * @param offset Offset into `block` to map from. + * @param size Size of the mapping. + * @param state MemoryState tag to attach to the VMA. + * @returns The address at which the memory was mapped. + */ + ResultVal MapMemoryBlockToBase(VAddr base, u32 region_size, + std::shared_ptr> block, size_t offset, + u32 size, MemoryState state); /** * Maps an unmanaged host memory pointer at a given address. * @@ -224,4 +238,4 @@ private: /// Updates the pages corresponding to this VMA so they match the VMA's attributes. void UpdatePageTableForVMA(const VirtualMemoryArea& vma); }; -} +} // namespace Kernel