diff --git a/src/core/hle/applets/erreula.cpp b/src/core/hle/applets/erreula.cpp index 7b8cf1d02..ef9da4989 100644 --- a/src/core/hle/applets/erreula.cpp +++ b/src/core/hle/applets/erreula.cpp @@ -28,11 +28,9 @@ ResultCode ErrEula::ReceiveParameter(const Service::APT::MessageParameter& param // TODO: allocated memory never released using Kernel::MemoryPermission; - // Allocate a heap block of the required size for this applet. - heap_memory = std::make_shared>(capture_info.size); // Create a SharedMemory that directly points to this heap block. framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet( - heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, + 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, "ErrEula Memory"); // Send the response message with the newly created SharedMemory diff --git a/src/core/hle/applets/mii_selector.cpp b/src/core/hle/applets/mii_selector.cpp index dd57f0efd..f51b720a0 100644 --- a/src/core/hle/applets/mii_selector.cpp +++ b/src/core/hle/applets/mii_selector.cpp @@ -35,11 +35,9 @@ ResultCode MiiSelector::ReceiveParameter(const Service::APT::MessageParameter& p memcpy(&capture_info, parameter.buffer.data(), sizeof(capture_info)); using Kernel::MemoryPermission; - // Allocate a heap block of the required size for this applet. - heap_memory = std::make_shared>(capture_info.size); // Create a SharedMemory that directly points to this heap block. framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet( - heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, + 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, "MiiSelector Memory"); // Send the response message with the newly created SharedMemory diff --git a/src/core/hle/applets/mint.cpp b/src/core/hle/applets/mint.cpp index ee70ba615..1986961bf 100644 --- a/src/core/hle/applets/mint.cpp +++ b/src/core/hle/applets/mint.cpp @@ -28,11 +28,9 @@ ResultCode Mint::ReceiveParameter(const Service::APT::MessageParameter& paramete // TODO: allocated memory never released using Kernel::MemoryPermission; - // Allocate a heap block of the required size for this applet. - heap_memory = std::make_shared>(capture_info.size); // Create a SharedMemory that directly points to this heap block. framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet( - heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, + 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, "Mint Memory"); // Send the response message with the newly created SharedMemory diff --git a/src/core/hle/applets/swkbd.cpp b/src/core/hle/applets/swkbd.cpp index cd8a9f57a..750a20849 100644 --- a/src/core/hle/applets/swkbd.cpp +++ b/src/core/hle/applets/swkbd.cpp @@ -39,11 +39,9 @@ ResultCode SoftwareKeyboard::ReceiveParameter(Service::APT::MessageParameter con memcpy(&capture_info, parameter.buffer.data(), sizeof(capture_info)); using Kernel::MemoryPermission; - // Allocate a heap block of the required size for this applet. - heap_memory = std::make_shared>(capture_info.size); // Create a SharedMemory that directly points to this heap block. framebuffer_memory = Core::System::GetInstance().Kernel().CreateSharedMemoryForApplet( - heap_memory, 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, + 0, capture_info.size, MemoryPermission::ReadWrite, MemoryPermission::ReadWrite, "SoftwareKeyboard Memory"); // Send the response message with the newly created SharedMemory diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index 509ffee58..2e9d63c30 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -67,6 +67,10 @@ constexpr ResultCode ERR_MISALIGNED_SIZE(ErrorDescription::MisalignedSize, Error constexpr ResultCode ERR_OUT_OF_MEMORY(ErrorDescription::OutOfMemory, ErrorModule::Kernel, ErrorSummary::OutOfResource, ErrorLevel::Permanent); // 0xD86007F3 +/// Returned when out of heap or linear heap memory when allocating +constexpr ResultCode ERR_OUT_OF_HEAP_MEMORY(ErrorDescription::OutOfMemory, ErrorModule::OS, + ErrorSummary::OutOfResource, + ErrorLevel::Status); // 0xC860180A constexpr ResultCode ERR_NOT_IMPLEMENTED(ErrorDescription::NotImplemented, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF4 diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 2beaaa067..a6db1eb39 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -179,7 +179,6 @@ public: /** * Creates a shared memory object from a block of memory managed by an HLE applet. - * @param heap_block Heap block of the HLE applet. * @param offset The offset into the heap block that the SharedMemory will map. * @param size Size of the memory block. Must be page-aligned. * @param permissions Permission restrictions applied to the process which created the block. @@ -187,8 +186,7 @@ public: * block. * @param name Optional object name, used for debugging purposes. */ - SharedPtr CreateSharedMemoryForApplet(std::shared_ptr> heap_block, - u32 offset, u32 size, + SharedPtr CreateSharedMemoryForApplet(u32 offset, u32 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name = "Unknown Applet"); diff --git a/src/core/hle/kernel/memory.cpp b/src/core/hle/kernel/memory.cpp index 3a2e9957a..b53c273fe 100644 --- a/src/core/hle/kernel/memory.cpp +++ b/src/core/hle/kernel/memory.cpp @@ -50,13 +50,7 @@ void KernelSystem::MemoryInit(u32 mem_type) { // the sizes specified in the memory_region_sizes table. VAddr base = 0; for (int i = 0; i < 3; ++i) { - memory_regions[i].base = base; - memory_regions[i].size = memory_region_sizes[mem_type][i]; - memory_regions[i].used = 0; - memory_regions[i].linear_heap_memory = std::make_shared>(); - // Reserve enough space for this region of FCRAM. - // We do not want this block of memory to be relocated when allocating from it. - memory_regions[i].linear_heap_memory->reserve(memory_regions[i].size); + memory_regions[i].Reset(base, memory_region_sizes[mem_type][i]); base += memory_regions[i].size; } @@ -164,4 +158,75 @@ void KernelSystem::MapSharedPages(VMManager& address_space) { address_space.Reprotect(shared_page_vma, VMAPermission::Read); } +void MemoryRegionInfo::Reset(u32 base, u32 size) { + this->base = base; + this->size = size; + used = 0; + free_blocks.clear(); + + // mark the entire region as free + free_blocks.insert(Interval::right_open(base, base + size)); +} + +MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) { + IntervalSet result; + u32 rest = size; + + // Try allocating from the higher address + for (auto iter = free_blocks.rbegin(); iter != free_blocks.rend(); ++iter) { + ASSERT(iter->bounds() == boost::icl::interval_bounds::right_open()); + if (iter->upper() - iter->lower() >= rest) { + // Requested size is fulfilled with this block + result += Interval(iter->upper() - rest, iter->upper()); + rest = 0; + break; + } + result += *iter; + rest -= iter->upper() - iter->lower(); + } + + if (rest != 0) { + // There is no enough free space + return {}; + } + + free_blocks -= result; + used += size; + return result; +} + +bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) { + Interval interval(offset, offset + size); + if (!boost::icl::contains(free_blocks, interval)) { + // The requested range is already allocated + return false; + } + free_blocks -= interval; + used += size; + return true; +} + +std::optional MemoryRegionInfo::LinearAllocate(u32 size) { + // Find the first sufficient continuous block from the lower address + for (const auto& interval : free_blocks) { + ASSERT(interval.bounds() == boost::icl::interval_bounds::right_open()); + if (interval.upper() - interval.lower() >= size) { + Interval allocated(interval.lower(), interval.lower() + size); + free_blocks -= allocated; + used += size; + return allocated.lower(); + } + } + + // No sufficient block found + return {}; +} + +void MemoryRegionInfo::Free(u32 offset, u32 size) { + Interval interval(offset, offset + size); + ASSERT(!boost::icl::intersects(free_blocks, interval)); // must be allocated blocks + free_blocks += interval; + used -= size; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/memory.h b/src/core/hle/kernel/memory.h index 14a38bdb9..67512df79 100644 --- a/src/core/hle/kernel/memory.h +++ b/src/core/hle/kernel/memory.h @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include +#include #include "common/common_types.h" namespace Kernel { @@ -18,7 +18,48 @@ struct MemoryRegionInfo { u32 size; u32 used; - std::shared_ptr> linear_heap_memory; + // The domain of the interval_set are offsets from start of FCRAM + using IntervalSet = boost::icl::interval_set; + using Interval = IntervalSet::interval_type; + + IntervalSet free_blocks; + + /** + * Reset the allocator state + * @param base The base offset the beginning of FCRAM. + * @param size The region size this allocator manages + */ + void Reset(u32 base, u32 size); + + /** + * Allocates memory from the heap. + * @param size The size of memory to allocate. + * @returns The set of blocks that make up the allocation request. Empty set if there is no + * enough space. + */ + IntervalSet HeapAllocate(u32 size); + + /** + * Allocates memory from the linear heap with specific address and size. + * @param offset the address offset to the beginning of FCRAM. + * @param size size of the memory to allocate. + * @returns true if the allocation is successful. false if the requested region is not free. + */ + bool LinearAllocate(u32 offset, u32 size); + + /** + * Allocates memory from the linear heap with only size specified. + * @param size size of the memory to allocate. + * @returns the address offset to the beginning of FCRAM; null if there is no enough space + */ + std::optional LinearAllocate(u32 size); + + /** + * Frees one segment of memory. The memory must have been allocated as heap or linear heap. + * @param offset the region address offset to the beginning of FCRAM. + * @param size the size of the region to free. + */ + void Free(u32 offset, u32 size); }; void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index c41d4ebd9..5e699937b 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -119,13 +119,9 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) { auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { - auto vma = vm_manager - .MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size, - memory_state) - .Unwrap(); - vm_manager.Reprotect(vma, permissions); - misc_memory_used += segment.size; - memory_region->used += segment.size; + HeapAllocate(segment.addr, segment.size, permissions, memory_state, true); + Memory::WriteBlock(*this, segment.addr, codeset->memory->data() + segment.offset, + segment.size); }; // Map CodeSet segments @@ -134,13 +130,8 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) { MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private); // Allocate and map stack - vm_manager - .MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size, - std::make_shared>(stack_size, 0), 0, stack_size, - MemoryState::Locked) - .Unwrap(); - misc_memory_used += stack_size; - memory_region->used += stack_size; + HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite, + MemoryState::Locked, true); // Map special address mappings kernel.MapSharedPages(vm_manager); @@ -168,44 +159,55 @@ VAddr Process::GetLinearHeapLimit() const { return GetLinearHeapBase() + memory_region->size; } -ResultVal Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { +ResultVal Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms, + MemoryState memory_state, bool skip_range_check) { + LOG_DEBUG(Kernel, "Allocate heap target={:08X}, size={:08X}", target, size); if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { - return ERR_INVALID_ADDRESS; + if (!skip_range_check) { + LOG_ERROR(Kernel, "Invalid heap address"); + return ERR_INVALID_ADDRESS; + } } - if (heap_memory == nullptr) { - // Initialize heap - heap_memory = std::make_shared>(); - heap_start = heap_end = target; + auto vma = vm_manager.FindVMA(target); + if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) { + LOG_ERROR(Kernel, "Trying to allocate already allocated memory"); + return ERR_INVALID_ADDRESS_STATE; } - // If necessary, expand backing vector to cover new heap extents. - if (target < heap_start) { - heap_memory->insert(begin(*heap_memory), heap_start - target, 0); - heap_start = target; - vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); + auto allocated_fcram = memory_region->HeapAllocate(size); + if (allocated_fcram.empty()) { + LOG_ERROR(Kernel, "Not enough space"); + return ERR_OUT_OF_HEAP_MEMORY; } - if (target + size > heap_end) { - heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); - heap_end = target + size; - vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); + + // Maps heap block by block + VAddr interval_target = target; + for (const auto& interval : allocated_fcram) { + u32 interval_size = interval.upper() - interval.lower(); + LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(), + interval.upper()); + std::fill(Memory::fcram.begin() + interval.lower(), + Memory::fcram.begin() + interval.upper(), 0); + auto vma = vm_manager.MapBackingMemory( + interval_target, Memory::fcram.data() + interval.lower(), interval_size, memory_state); + ASSERT(vma.Succeeded()); + vm_manager.Reprotect(vma.Unwrap(), perms); + interval_target += interval_size; } - ASSERT(heap_end - heap_start == heap_memory->size()); - CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, - size, MemoryState::Private)); - vm_manager.Reprotect(vma, perms); + memory_used += size; + resource_limit->current_commit += size; - heap_used += size; - memory_region->used += size; - - return MakeResult(heap_end - size); + return MakeResult(target); } ResultCode Process::HeapFree(VAddr target, u32 size) { + LOG_DEBUG(Kernel, "Free heap target={:08X}, size={:08X}", target, size); if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { + LOG_ERROR(Kernel, "Invalid heap address"); return ERR_INVALID_ADDRESS; } @@ -213,59 +215,72 @@ ResultCode Process::HeapFree(VAddr target, u32 size) { return RESULT_SUCCESS; } - ResultCode result = vm_manager.UnmapRange(target, size); - if (result.IsError()) - return result; + // Free heaps block by block + CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size)); + for (const auto [backing_memory, block_size] : backing_blocks) { + memory_region->Free(Memory::GetFCRAMOffset(backing_memory), block_size); + } - heap_used -= size; - memory_region->used -= size; + ResultCode result = vm_manager.UnmapRange(target, size); + ASSERT(result.IsSuccess()); + + memory_used -= size; + resource_limit->current_commit -= size; return RESULT_SUCCESS; } ResultVal Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) { - auto& linheap_memory = memory_region->linear_heap_memory; - - VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size(); - // Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address), - // but explicit addresses are also accepted and respected. + LOG_DEBUG(Kernel, "Allocate linear heap target={:08X}, size={:08X}", target, size); + u32 physical_offset; if (target == 0) { - target = heap_end; + auto offset = memory_region->LinearAllocate(size); + if (!offset) { + LOG_ERROR(Kernel, "Not enough space"); + return ERR_OUT_OF_HEAP_MEMORY; + } + physical_offset = *offset; + target = physical_offset + GetLinearHeapAreaAddress(); + } else { + if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || + target + size < target) { + LOG_ERROR(Kernel, "Invalid linear heap address"); + return ERR_INVALID_ADDRESS; + } + + // Kernel would crash/return error when target doesn't meet some requirement. + // It seems that target is required to follow immediately after the allocated linear heap, + // or cover the entire hole if there is any. + // Right now we just ignore these checks because they are still unclear. Further more, + // games and homebrew only ever seem to pass target = 0 here (which lets the kernel decide + // the address), so this not important. + + physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM + if (!memory_region->LinearAllocate(physical_offset, size)) { + LOG_ERROR(Kernel, "Trying to allocate already allocated memory"); + return ERR_INVALID_ADDRESS_STATE; + } } - if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end || - target + size < target) { + u8* backing_memory = Memory::fcram.data() + physical_offset; - return ERR_INVALID_ADDRESS; - } + std::fill(backing_memory, backing_memory + size, 0); + auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous); + ASSERT(vma.Succeeded()); + vm_manager.Reprotect(vma.Unwrap(), perms); - // Expansion of the linear heap is only allowed if you do an allocation immediately at its - // end. It's possible to free gaps in the middle of the heap and then reallocate them later, - // but expansions are only allowed at the end. - if (target == heap_end) { - linheap_memory->insert(linheap_memory->end(), size, 0); - vm_manager.RefreshMemoryBlockMappings(linheap_memory.get()); - } - - // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the - // same region. It is unknown if or how the 3DS kernel checks against this. - std::size_t offset = target - GetLinearHeapBase(); - CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, - MemoryState::Continuous)); - vm_manager.Reprotect(vma, perms); - - linear_heap_used += size; - memory_region->used += size; + memory_used += size; + resource_limit->current_commit += size; + LOG_DEBUG(Kernel, "Allocated at target={:08X}", target); return MakeResult(target); } ResultCode Process::LinearFree(VAddr target, u32 size) { - auto& linheap_memory = memory_region->linear_heap_memory; - + LOG_DEBUG(Kernel, "Free linear heap target={:08X}, size={:08X}", target, size); if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target + size < target) { - + LOG_ERROR(Kernel, "Invalid linear heap address"); return ERR_INVALID_ADDRESS; } @@ -273,32 +288,75 @@ ResultCode Process::LinearFree(VAddr target, u32 size) { return RESULT_SUCCESS; } - VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size(); - if (target + size > heap_end) { + ResultCode result = vm_manager.UnmapRange(target, size); + if (result.IsError()) { + LOG_ERROR(Kernel, "Trying to free already freed memory"); + return result; + } + + memory_used -= size; + resource_limit->current_commit -= size; + + u32 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM + memory_region->Free(physical_offset, size); + + return RESULT_SUCCESS; +} + +ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms) { + LOG_DEBUG(Kernel, "Map memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", target, + source, size, static_cast(perms)); + if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END || + source + size < source) { + LOG_ERROR(Kernel, "Invalid source address"); + return ERR_INVALID_ADDRESS; + } + + // TODO(wwylele): check target address range. Is it also restricted to heap region? + + auto vma = vm_manager.FindVMA(target); + if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) { + LOG_ERROR(Kernel, "Trying to map to already allocated memory"); return ERR_INVALID_ADDRESS_STATE; } - ResultCode result = vm_manager.UnmapRange(target, size); - if (result.IsError()) - return result; + // Mark source region as Aliased + CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Private, + VMAPermission::ReadWrite, MemoryState::Aliased, + VMAPermission::ReadWrite)); - linear_heap_used -= size; - memory_region->used -= size; - - if (target + size == heap_end) { - // End of linear heap has been freed, so check what's the last allocated block in it and - // reduce the size. - auto vma = vm_manager.FindVMA(target); - ASSERT(vma != vm_manager.vma_map.end()); - ASSERT(vma->second.type == VMAType::Free); - VAddr new_end = vma->second.base; - if (new_end >= GetLinearHeapBase()) { - linheap_memory->resize(new_end - GetLinearHeapBase()); - } + CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size)); + VAddr interval_target = target; + for (const auto [backing_memory, block_size] : backing_blocks) { + auto target_vma = vm_manager.MapBackingMemory(interval_target, backing_memory, block_size, + MemoryState::Alias); + interval_target += block_size; } return RESULT_SUCCESS; } +ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms) { + LOG_DEBUG(Kernel, "Unmap memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", + target, source, size, static_cast(perms)); + if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END || + source + size < source) { + LOG_ERROR(Kernel, "Invalid source address"); + return ERR_INVALID_ADDRESS; + } + + // TODO(wwylele): check target address range. Is it also restricted to heap region? + + // TODO(wwylele): check that the source and the target are actually a pair created by Map + // Should return error 0xD8E007F5 in this case + + CASCADE_CODE(vm_manager.UnmapRange(target, size)); + + // Change back source region state. Note that the permission is reprotected according to param + CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Aliased, + VMAPermission::None, MemoryState::Private, perms)); + + return RESULT_SUCCESS; +} Kernel::Process::Process(KernelSystem& kernel) : Object(kernel), handle_table(kernel), kernel(kernel) {} diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index d1f529943..6924afe36 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -165,15 +165,7 @@ public: VMManager vm_manager; - // Memory used to back the allocations in the regular heap. A single vector is used to cover - // the entire virtual address space extents that bound the allocations, including any holes. - // This makes deallocation and reallocation of holes fast and keeps process memory contiguous - // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. - std::shared_ptr> heap_memory; - // The left/right bounds of the address space covered by heap_memory. - VAddr heap_start = 0, heap_end = 0; - - u32 heap_used = 0, linear_heap_used = 0, misc_memory_used = 0; + u32 memory_used = 0; MemoryRegionInfo* memory_region = nullptr; @@ -188,12 +180,17 @@ public: VAddr GetLinearHeapBase() const; VAddr GetLinearHeapLimit() const; - ResultVal HeapAllocate(VAddr target, u32 size, VMAPermission perms); + ResultVal HeapAllocate(VAddr target, u32 size, VMAPermission perms, + MemoryState memory_state = MemoryState::Private, + bool skip_range_check = false); ResultCode HeapFree(VAddr target, u32 size); ResultVal LinearAllocate(VAddr target, u32 size, VMAPermission perms); ResultCode LinearFree(VAddr target, u32 size); + ResultCode Map(VAddr target, VAddr source, u32 size, VMAPermission perms); + ResultCode Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms); + private: explicit Process(Kernel::KernelSystem& kernel); ~Process() override; diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index 4f0b2e8ee..560d3f3ac 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -11,8 +11,13 @@ namespace Kernel { -SharedMemory::SharedMemory(KernelSystem& kernel) : Object(kernel) {} -SharedMemory::~SharedMemory() {} +SharedMemory::SharedMemory(KernelSystem& kernel) : Object(kernel), kernel(kernel) {} +SharedMemory::~SharedMemory() { + for (const auto& interval : holding_memory) { + kernel.GetMemoryRegion(MemoryRegion::SYSTEM) + ->Free(interval.lower(), interval.upper() - interval.lower()); + } +} SharedPtr KernelSystem::CreateSharedMemory(Process* owner_process, u32 size, MemoryPermission permissions, @@ -31,44 +36,26 @@ SharedPtr KernelSystem::CreateSharedMemory(Process* owner_process, // We need to allocate a block from the Linear Heap ourselves. // We'll manually allocate some memory from the linear heap in the specified region. MemoryRegionInfo* memory_region = GetMemoryRegion(region); - auto& linheap_memory = memory_region->linear_heap_memory; + auto offset = memory_region->LinearAllocate(size); - ASSERT_MSG(linheap_memory->size() + size <= memory_region->size, - "Not enough space in region to allocate shared memory!"); + ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!"); - shared_memory->backing_block = linheap_memory; - shared_memory->backing_block_offset = linheap_memory->size(); - // Allocate some memory from the end of the linear heap for this region. - linheap_memory->insert(linheap_memory->end(), size, 0); - memory_region->used += size; - - shared_memory->linear_heap_phys_address = - Memory::FCRAM_PADDR + memory_region->base + - static_cast(shared_memory->backing_block_offset); + std::fill(Memory::fcram.data() + *offset, Memory::fcram.data() + *offset + size, 0); + shared_memory->backing_blocks = {{Memory::fcram.data() + *offset, size}}; + shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size); + shared_memory->linear_heap_phys_offset = *offset; // Increase the amount of used linear heap memory for the owner process. if (shared_memory->owner_process != nullptr) { - shared_memory->owner_process->linear_heap_used += size; - } - - // Refresh the address mappings for the current process. - if (current_process != nullptr) { - current_process->vm_manager.RefreshMemoryBlockMappings(linheap_memory.get()); + shared_memory->owner_process->memory_used += size; } } else { auto& vm_manager = shared_memory->owner_process->vm_manager; // The memory is already available and mapped in the owner process. - auto vma = vm_manager.FindVMA(address); - ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address"); - ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); - // The returned VMA might be a bigger one encompassing the desired address. - auto vma_offset = address - vma->first; - ASSERT_MSG(vma_offset + size <= vma->second.size, - "Shared memory exceeds bounds of mapped block"); - - shared_memory->backing_block = vma->second.backing_block; - shared_memory->backing_block_offset = vma->second.offset + vma_offset; + auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size); + ASSERT_MSG(backing_blocks.Succeeded(), "Trying to share freed memory"); + shared_memory->backing_blocks = std::move(backing_blocks).Unwrap(); } shared_memory->base_address = address; @@ -76,17 +63,26 @@ SharedPtr KernelSystem::CreateSharedMemory(Process* owner_process, } SharedPtr KernelSystem::CreateSharedMemoryForApplet( - std::shared_ptr> heap_block, u32 offset, u32 size, MemoryPermission permissions, - MemoryPermission other_permissions, std::string name) { + u32 offset, u32 size, MemoryPermission permissions, MemoryPermission other_permissions, + std::string name) { SharedPtr shared_memory(new SharedMemory(*this)); + // Allocate memory in heap + MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::SYSTEM); + auto backing_blocks = memory_region->HeapAllocate(size); + ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!"); + shared_memory->holding_memory = backing_blocks; shared_memory->owner_process = nullptr; shared_memory->name = std::move(name); shared_memory->size = size; shared_memory->permissions = permissions; shared_memory->other_permissions = other_permissions; - shared_memory->backing_block = heap_block; - shared_memory->backing_block_offset = offset; + for (const auto& interval : backing_blocks) { + shared_memory->backing_blocks.push_back( + {Memory::fcram.data() + interval.lower(), interval.upper() - interval.lower()}); + std::fill(Memory::fcram.data() + interval.lower(), Memory::fcram.data() + interval.upper(), + 0); + } shared_memory->base_address = Memory::HEAP_VADDR + offset; return shared_memory; @@ -146,24 +142,29 @@ ResultCode SharedMemory::Map(Process* target_process, VAddr address, MemoryPermi if (base_address == 0 && target_address == 0) { // Calculate the address at which to map the memory block. - auto maybe_vaddr = Memory::PhysicalToVirtualAddress(linear_heap_phys_address); - ASSERT(maybe_vaddr); - target_address = *maybe_vaddr; + target_address = linear_heap_phys_offset + target_process->GetLinearHeapAreaAddress(); + } + + auto vma = target_process->vm_manager.FindVMA(target_address); + if (vma->second.type != VMAType::Free || + vma->second.base + vma->second.size < target_address + size) { + LOG_ERROR(Kernel, + "cannot map id={}, address=0x{:08X} name={}, mapping to already allocated memory", + GetObjectId(), address, name); + return ERR_INVALID_ADDRESS_STATE; } // Map the memory block into the target process - auto result = target_process->vm_manager.MapMemoryBlock( - target_address, backing_block, backing_block_offset, size, MemoryState::Shared); - if (result.Failed()) { - LOG_ERROR( - Kernel, - "cannot map id={}, target_address=0x{:08X} name={}, error mapping to virtual memory", - GetObjectId(), target_address, name); - return result.Code(); + VAddr interval_target = target_address; + for (const auto& interval : backing_blocks) { + auto vma = target_process->vm_manager.MapBackingMemory( + interval_target, interval.first, interval.second, MemoryState::Shared); + ASSERT(vma.Succeeded()); + target_process->vm_manager.Reprotect(vma.Unwrap(), ConvertPermissions(permissions)); + interval_target += interval.second; } - return target_process->vm_manager.ReprotectRange(target_address, size, - ConvertPermissions(permissions)); + return RESULT_SUCCESS; } ResultCode SharedMemory::Unmap(Process* target_process, VAddr address) { @@ -179,7 +180,10 @@ VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) { }; u8* SharedMemory::GetPointer(u32 offset) { - return backing_block->data() + backing_block_offset + offset; + if (backing_blocks.size() != 1) { + LOG_WARNING(Kernel, "Unsafe GetPointer on discontinuous SharedMemory"); + } + return backing_blocks[0].first + offset; } } // namespace Kernel diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 18a87b9fe..a48f607ba 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -61,13 +61,11 @@ public: Process* owner_process; /// Address of shared memory block in the owner process if specified. VAddr base_address; - /// Physical address of the shared memory block in the linear heap if no address was specified + /// Offset in FCRAM of the shared memory block in the linear heap if no address was specified /// during creation. - PAddr linear_heap_phys_address; + PAddr linear_heap_phys_offset; /// Backing memory for this shared memory block. - std::shared_ptr> backing_block; - /// Offset into the backing block for this shared memory. - std::size_t backing_block_offset; + std::vector> backing_blocks; /// Size of the memory block. Page-aligned. u32 size; /// Permission restrictions applied to the process which created the block. @@ -77,11 +75,14 @@ public: /// Name of shared memory object. std::string name; + MemoryRegionInfo::IntervalSet holding_memory; + private: explicit SharedMemory(KernelSystem& kernel); ~SharedMemory() override; friend class KernelSystem; + KernelSystem& kernel; }; } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f23b569d7..1594c2ade 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -114,16 +114,12 @@ static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 add } case MEMOP_MAP: { - // TODO: This is just a hack to avoid regressions until memory aliasing is implemented - CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions)); + CASCADE_CODE(process.Map(addr0, addr1, size, vma_permissions)); break; } case MEMOP_UNMAP: { - // TODO: This is just a hack to avoid regressions until memory aliasing is implemented - ResultCode result = process.HeapFree(addr0, size); - if (result.IsError()) - return result; + CASCADE_CODE(process.Unmap(addr0, addr1, size, vma_permissions)); break; } @@ -1289,7 +1285,7 @@ static ResultCode GetProcessInfo(s64* out, Handle process_handle, u32 type) { case 2: // TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure // what's the difference between them. - *out = process->heap_used + process->linear_heap_used + process->misc_memory_used; + *out = process->memory_used; if (*out % Memory::PAGE_SIZE != 0) { LOG_ERROR(Kernel_SVC, "called, memory size not page-aligned"); return ERR_MISALIGNED_SIZE; diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 236e1d7b0..c0a2ccee9 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -333,32 +333,27 @@ ResultVal> KernelSystem::CreateThread(std::string name, VAddr // There are no already-allocated pages with free slots, lets allocate a new one. // TLS pages are allocated from the BASE region in the linear heap. MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::BASE); - auto& linheap_memory = memory_region->linear_heap_memory; - if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) { + // Allocate some memory from the end of the linear heap for this region. + auto offset = memory_region->LinearAllocate(Memory::PAGE_SIZE); + if (!offset) { LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread"); return ERR_OUT_OF_MEMORY; } - - std::size_t offset = linheap_memory->size(); - - // Allocate some memory from the end of the linear heap for this region. - linheap_memory->insert(linheap_memory->end(), Memory::PAGE_SIZE, 0); - memory_region->used += Memory::PAGE_SIZE; - owner_process.linear_heap_used += Memory::PAGE_SIZE; + owner_process.memory_used += Memory::PAGE_SIZE; tls_slots.emplace_back(0); // The page is completely available at the start available_page = tls_slots.size() - 1; available_slot = 0; // Use the first slot in the new page auto& vm_manager = owner_process.vm_manager; - vm_manager.RefreshMemoryBlockMappings(linheap_memory.get()); // Map the page to the current process' address space. // TODO(Subv): Find the correct MemoryState for this region. - vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE, - linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::Private); + vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE, + Memory::fcram.data() + *offset, Memory::PAGE_SIZE, + MemoryState::Private); } // Mark the slot as used @@ -366,6 +361,8 @@ ResultVal> KernelSystem::CreateThread(std::string name, VAddr thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE; + Memory::ZeroBlock(owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE); + // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used // to initialize the context ResetThreadContext(thread->context, stack_top, entry_point, arg); diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 36e24682b..bab8891bc 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -408,4 +408,25 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { break; } } + +ResultVal>> VMManager::GetBackingBlocksForRange(VAddr address, + u32 size) { + std::vector> backing_blocks; + VAddr interval_target = address; + while (interval_target != address + size) { + auto vma = FindVMA(interval_target); + if (vma->second.type != VMAType::BackingMemory) { + LOG_ERROR(Kernel, "Trying to use already freed memory"); + return ERR_INVALID_ADDRESS_STATE; + } + + VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size); + u32 interval_size = interval_end - interval_target; + u8* backing_memory = vma->second.backing_memory + (interval_target - vma->second.base); + backing_blocks.push_back({backing_memory, interval_size}); + + interval_target += interval_size; + } + return MakeResult(backing_blocks); +} } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 7ac5c3b01..5464ad50b 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -6,6 +6,7 @@ #include #include +#include #include #include "common/common_types.h" #include "core/hle/result.h" @@ -213,6 +214,9 @@ public: /// Dumps the address space layout to the log, for debugging void LogLayout(Log::Level log_level) const; + /// Gets a list of backing memory blocks for the specified range + ResultVal>> GetBackingBlocksForRange(VAddr address, u32 size); + /// Each VMManager has its own page table, which is set as the main one when the owning process /// is scheduled. Memory::PageTable page_table; diff --git a/src/core/hle/service/apt/apt.cpp b/src/core/hle/service/apt/apt.cpp index 13c278158..741a288e8 100644 --- a/src/core/hle/service/apt/apt.cpp +++ b/src/core/hle/service/apt/apt.cpp @@ -207,8 +207,8 @@ void Module::Interface::GetSharedFont(Kernel::HLERequestContext& ctx) { // The shared font has to be relocated to the new address before being passed to the // application. - auto maybe_vaddr = - Memory::PhysicalToVirtualAddress(apt->shared_font_mem->linear_heap_phys_address); + auto maybe_vaddr = Memory::PhysicalToVirtualAddress( + apt->shared_font_mem->linear_heap_phys_offset + Memory::FCRAM_PADDR); ASSERT(maybe_vaddr); VAddr target_address = *maybe_vaddr; if (!apt->shared_font_relocated) { diff --git a/src/core/memory.cpp b/src/core/memory.cpp index eed279c9a..697345ea7 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -23,6 +23,7 @@ namespace Memory { static std::array vram; static std::array n3ds_extra_ram; +std::array fcram; static PageTable* current_page_table = nullptr; @@ -305,15 +306,7 @@ u8* GetPhysicalPointer(PAddr address) { target_pointer = Core::DSP().GetDspMemory().data() + offset_into_region; break; case FCRAM_PADDR: - for (const auto& region : Core::System::GetInstance().Kernel().memory_regions) { - if (offset_into_region >= region.base && - offset_into_region < region.base + region.size) { - target_pointer = - region.linear_heap_memory->data() + offset_into_region - region.base; - break; - } - } - ASSERT_MSG(target_pointer != nullptr, "Invalid FCRAM address"); + target_pointer = fcram.data() + offset_into_region; break; case N3DS_EXTRA_RAM_PADDR: target_pointer = n3ds_extra_ram.data() + offset_into_region; @@ -846,4 +839,9 @@ std::optional PhysicalToVirtualAddress(const PAddr addr) { return {}; } +u32 GetFCRAMOffset(u8* pointer) { + ASSERT(pointer >= fcram.data() && pointer < fcram.data() + fcram.size()); + return pointer - fcram.data(); +} + } // namespace Memory diff --git a/src/core/memory.h b/src/core/memory.h index f457ec934..bad47375a 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -176,6 +176,8 @@ enum : VAddr { NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE, }; +extern std::array fcram; + /// Currently active page table void SetCurrentPageTable(PageTable* page_table); PageTable* GetCurrentPageTable(); @@ -271,4 +273,7 @@ enum class FlushMode { */ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode); +/// Gets offset in FCRAM from a pointer inside FCRAM range +u32 GetFCRAMOffset(u8* pointer); + } // namespace Memory