2021-04-03 07:16:58 +02:00
|
|
|
// Copyright 2021 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_funcs.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "core/core.h"
|
2022-03-12 01:11:57 +01:00
|
|
|
#include "core/device_memory.h"
|
2021-04-03 07:16:58 +02:00
|
|
|
#include "core/hardware_properties.h"
|
|
|
|
#include "core/hle/kernel/init/init_slab_setup.h"
|
2021-12-05 21:04:08 +01:00
|
|
|
#include "core/hle/kernel/k_code_memory.h"
|
2021-04-04 09:56:09 +02:00
|
|
|
#include "core/hle/kernel/k_event.h"
|
2021-04-03 07:16:58 +02:00
|
|
|
#include "core/hle/kernel/k_memory_layout.h"
|
|
|
|
#include "core/hle/kernel/k_memory_manager.h"
|
2022-03-12 01:11:57 +01:00
|
|
|
#include "core/hle/kernel/k_page_buffer.h"
|
2021-04-24 02:00:15 +02:00
|
|
|
#include "core/hle/kernel/k_port.h"
|
2021-04-24 07:04:28 +02:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2021-04-21 06:28:11 +02:00
|
|
|
#include "core/hle/kernel/k_resource_limit.h"
|
2021-04-14 02:48:37 +02:00
|
|
|
#include "core/hle/kernel/k_session.h"
|
2021-04-04 08:22:07 +02:00
|
|
|
#include "core/hle/kernel/k_shared_memory.h"
|
2022-03-12 01:11:57 +01:00
|
|
|
#include "core/hle/kernel/k_shared_memory_info.h"
|
2021-04-03 07:16:58 +02:00
|
|
|
#include "core/hle/kernel/k_system_control.h"
|
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2022-03-12 01:11:57 +01:00
|
|
|
#include "core/hle/kernel/k_thread_local_page.h"
|
2021-04-21 06:28:11 +02:00
|
|
|
#include "core/hle/kernel/k_transfer_memory.h"
|
2021-04-03 07:16:58 +02:00
|
|
|
|
|
|
|
namespace Kernel::Init {
|
|
|
|
|
2021-05-05 06:35:42 +02:00
|
|
|
#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS
|
2021-04-03 07:16:58 +02:00
|
|
|
|
2021-04-04 07:22:36 +02:00
|
|
|
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
|
2021-04-24 07:04:28 +02:00
|
|
|
HANDLER(KProcess, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \
|
2021-04-04 08:22:07 +02:00
|
|
|
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
|
2021-04-04 09:56:09 +02:00
|
|
|
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
2021-04-24 02:00:15 +02:00
|
|
|
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
2021-04-14 02:48:37 +02:00
|
|
|
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
2022-03-12 01:11:57 +01:00
|
|
|
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
|
2021-04-24 02:00:15 +02:00
|
|
|
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
2021-12-05 21:04:08 +01:00
|
|
|
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
|
2021-04-21 06:28:11 +02:00
|
|
|
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
|
2022-03-12 01:11:57 +01:00
|
|
|
HANDLER(KThreadLocalPage, \
|
|
|
|
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
|
|
|
|
##__VA_ARGS__) \
|
2021-04-21 06:28:11 +02:00
|
|
|
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
2021-04-03 07:16:58 +02:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME,
|
|
|
|
|
|
|
|
enum KSlabType : u32 {
|
|
|
|
FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) KSlabType_Count,
|
|
|
|
};
|
|
|
|
|
|
|
|
#undef DEFINE_SLAB_TYPE_ENUM_MEMBER
|
|
|
|
|
|
|
|
// Constexpr counts.
|
2021-04-24 07:04:28 +02:00
|
|
|
constexpr size_t SlabCountKProcess = 80;
|
2021-04-03 07:16:58 +02:00
|
|
|
constexpr size_t SlabCountKThread = 800;
|
2022-03-12 01:11:57 +01:00
|
|
|
constexpr size_t SlabCountKEvent = 900;
|
2021-04-03 07:16:58 +02:00
|
|
|
constexpr size_t SlabCountKInterruptEvent = 100;
|
2022-03-12 01:11:57 +01:00
|
|
|
constexpr size_t SlabCountKPort = 384;
|
2021-04-03 07:16:58 +02:00
|
|
|
constexpr size_t SlabCountKSharedMemory = 80;
|
|
|
|
constexpr size_t SlabCountKTransferMemory = 200;
|
|
|
|
constexpr size_t SlabCountKCodeMemory = 10;
|
|
|
|
constexpr size_t SlabCountKDeviceAddressSpace = 300;
|
2022-03-12 01:11:57 +01:00
|
|
|
constexpr size_t SlabCountKSession = 1133;
|
2021-04-03 07:16:58 +02:00
|
|
|
constexpr size_t SlabCountKLightSession = 100;
|
|
|
|
constexpr size_t SlabCountKObjectName = 7;
|
|
|
|
constexpr size_t SlabCountKResourceLimit = 5;
|
|
|
|
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
|
2022-03-12 01:11:57 +01:00
|
|
|
constexpr size_t SlabCountKIoPool = 1;
|
|
|
|
constexpr size_t SlabCountKIoRegion = 6;
|
2021-04-03 07:16:58 +02:00
|
|
|
|
|
|
|
constexpr size_t SlabCountExtraKThread = 160;
|
|
|
|
|
2022-03-12 01:11:57 +01:00
|
|
|
/// Helper function to translate from the slab virtual address to the reserved location in physical
|
|
|
|
/// memory.
|
|
|
|
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
|
|
|
|
slab_addr -= memory_layout.GetSlabRegionAddress();
|
|
|
|
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
|
|
|
|
}
|
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
template <typename T>
|
|
|
|
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
|
|
|
size_t num_objects) {
|
2021-05-21 03:15:59 +02:00
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
|
|
|
VAddr start = Common::AlignUp(address, alignof(T));
|
|
|
|
|
2022-03-12 01:11:57 +01:00
|
|
|
// This should use the virtual memory address passed in, but currently, we do not setup the
|
|
|
|
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
|
|
|
|
// that we reserve for the slab heaps.
|
|
|
|
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
2021-05-21 03:15:59 +02:00
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
if (size > 0) {
|
2022-03-12 01:11:57 +01:00
|
|
|
void* backing_kernel_memory{
|
|
|
|
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
|
|
|
ASSERT(region != nullptr);
|
|
|
|
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
|
2021-05-21 03:15:59 +02:00
|
|
|
T::InitializeSlabHeap(system.Kernel(), backing_kernel_memory, size);
|
2021-04-03 07:16:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return start + size;
|
|
|
|
}
|
|
|
|
|
2022-03-12 22:02:45 +01:00
|
|
|
size_t CalculateSlabHeapGapSize() {
|
|
|
|
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
|
|
|
|
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
|
|
|
return KernelSlabHeapGapSize;
|
|
|
|
}
|
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
} // namespace
|
|
|
|
|
2021-05-05 06:35:42 +02:00
|
|
|
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
|
|
|
return {
|
|
|
|
.num_KProcess = SlabCountKProcess,
|
|
|
|
.num_KThread = SlabCountKThread,
|
|
|
|
.num_KEvent = SlabCountKEvent,
|
|
|
|
.num_KInterruptEvent = SlabCountKInterruptEvent,
|
|
|
|
.num_KPort = SlabCountKPort,
|
|
|
|
.num_KSharedMemory = SlabCountKSharedMemory,
|
|
|
|
.num_KTransferMemory = SlabCountKTransferMemory,
|
|
|
|
.num_KCodeMemory = SlabCountKCodeMemory,
|
|
|
|
.num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace,
|
|
|
|
.num_KSession = SlabCountKSession,
|
|
|
|
.num_KLightSession = SlabCountKLightSession,
|
|
|
|
.num_KObjectName = SlabCountKObjectName,
|
|
|
|
.num_KResourceLimit = SlabCountKResourceLimit,
|
|
|
|
.num_KDebug = SlabCountKDebug,
|
2022-03-12 01:11:57 +01:00
|
|
|
.num_KIoPool = SlabCountKIoPool,
|
|
|
|
.num_KIoRegion = SlabCountKIoRegion,
|
2021-05-05 06:35:42 +02:00
|
|
|
};
|
2021-04-03 07:16:58 +02:00
|
|
|
}
|
|
|
|
|
2021-05-05 06:35:42 +02:00
|
|
|
void InitializeSlabResourceCounts(KernelCore& kernel) {
|
|
|
|
kernel.SlabResourceCounts() = KSlabResourceCounts::CreateDefault();
|
2021-04-03 07:16:58 +02:00
|
|
|
if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) {
|
2021-05-05 06:35:42 +02:00
|
|
|
kernel.SlabResourceCounts().num_KThread += SlabCountExtraKThread;
|
2021-04-03 07:16:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-05 06:35:42 +02:00
|
|
|
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
|
2021-04-03 07:16:58 +02:00
|
|
|
size_t size = 0;
|
|
|
|
|
|
|
|
#define ADD_SLAB_SIZE(NAME, COUNT, ...) \
|
|
|
|
{ \
|
|
|
|
size += alignof(NAME); \
|
|
|
|
size += Common::AlignUp(sizeof(NAME) * (COUNT), alignof(void*)); \
|
|
|
|
};
|
|
|
|
|
|
|
|
// Add the size required for each slab.
|
|
|
|
FOREACH_SLAB_TYPE(ADD_SLAB_SIZE)
|
|
|
|
|
|
|
|
#undef ADD_SLAB_SIZE
|
|
|
|
|
|
|
|
// Add the reserved size.
|
2022-03-12 01:11:57 +01:00
|
|
|
size += CalculateSlabHeapGapSize();
|
2021-04-03 07:16:58 +02:00
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2022-03-12 01:11:57 +01:00
|
|
|
void InitializeKPageBufferSlabHeap(Core::System& system) {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
|
|
|
|
const auto& counts = kernel.SlabResourceCounts();
|
|
|
|
const size_t num_pages =
|
|
|
|
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
|
|
|
const size_t slab_size = num_pages * PageSize;
|
|
|
|
|
|
|
|
// Reserve memory from the system resource limit.
|
|
|
|
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
|
|
|
|
|
|
|
|
// Allocate memory for the slab.
|
|
|
|
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
|
|
|
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
|
|
|
const PAddr slab_address =
|
|
|
|
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
|
|
|
ASSERT(slab_address != 0);
|
|
|
|
|
|
|
|
// Initialize the slabheap.
|
|
|
|
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
|
|
|
slab_size);
|
|
|
|
}
|
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
2021-05-05 06:35:42 +02:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
|
2021-04-03 07:16:58 +02:00
|
|
|
// Get the start of the slab region, since that's where we'll be working.
|
|
|
|
VAddr address = memory_layout.GetSlabRegionAddress();
|
|
|
|
|
|
|
|
// Initialize slab type array to be in sorted order.
|
2021-05-01 21:25:51 +02:00
|
|
|
std::array<KSlabType, KSlabType_Count> slab_types;
|
|
|
|
for (size_t i = 0; i < slab_types.size(); i++) {
|
2021-04-03 07:16:58 +02:00
|
|
|
slab_types[i] = static_cast<KSlabType>(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// N shuffles the slab type array with the following simple algorithm.
|
2021-05-01 21:25:51 +02:00
|
|
|
for (size_t i = 0; i < slab_types.size(); i++) {
|
|
|
|
const size_t rnd = KSystemControl::GenerateRandomRange(0, slab_types.size() - 1);
|
2021-04-03 07:16:58 +02:00
|
|
|
std::swap(slab_types[i], slab_types[rnd]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an array to represent the gaps between the slabs.
|
2022-03-12 01:11:57 +01:00
|
|
|
const size_t total_gap_size = CalculateSlabHeapGapSize();
|
2021-05-01 21:25:51 +02:00
|
|
|
std::array<size_t, slab_types.size()> slab_gaps;
|
2022-03-12 01:11:57 +01:00
|
|
|
for (auto& slab_gap : slab_gaps) {
|
2021-04-03 07:16:58 +02:00
|
|
|
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
|
|
|
|
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
|
|
|
|
// will include it ourselves.
|
2022-03-12 01:11:57 +01:00
|
|
|
slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
2021-04-03 07:16:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sort the array, so that we can treat differences between values as offsets to the starts of
|
|
|
|
// slabs.
|
2021-05-01 21:25:51 +02:00
|
|
|
for (size_t i = 1; i < slab_gaps.size(); i++) {
|
2021-04-03 07:16:58 +02:00
|
|
|
for (size_t j = i; j > 0 && slab_gaps[j - 1] > slab_gaps[j]; j--) {
|
|
|
|
std::swap(slab_gaps[j], slab_gaps[j - 1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-12 01:11:57 +01:00
|
|
|
// Track the gaps, so that we can free them to the unused slab tree.
|
|
|
|
VAddr gap_start = address;
|
|
|
|
size_t gap_size = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
2021-04-03 07:16:58 +02:00
|
|
|
// Add the random gap to the address.
|
2022-03-12 01:11:57 +01:00
|
|
|
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
|
|
|
address += cur_gap;
|
|
|
|
gap_size += cur_gap;
|
2021-04-03 07:16:58 +02:00
|
|
|
|
|
|
|
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
|
|
|
case KSlabType_##NAME: \
|
2022-03-12 01:11:57 +01:00
|
|
|
if (COUNT > 0) { \
|
|
|
|
address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
|
|
|
|
} \
|
2021-04-03 07:16:58 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
// Initialize the slabheap.
|
|
|
|
switch (slab_types[i]) {
|
|
|
|
// For each of the slab types, we want to initialize that heap.
|
|
|
|
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
|
|
|
// If we somehow get an invalid type, abort.
|
|
|
|
default:
|
2022-03-12 01:11:57 +01:00
|
|
|
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've hit the end of a gap, free it.
|
|
|
|
if (gap_start + gap_size != address) {
|
|
|
|
gap_start = address;
|
|
|
|
gap_size = 0;
|
2021-04-03 07:16:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Kernel::Init
|