mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-24 09:20:58 +01:00
code: dodge PAGE_SIZE #define
Some header files, specifically for OSX and Musl libc define PAGE_SIZE to be a number This is great except in yuzu we're using PAGE_SIZE as a variable Specific example `static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;` PAGE_SIZE PAGE_BITS PAGE_MASK are all similar variables. Simply deleted the underscores, and then added YUZU_ prefix Might be worth noting that there are multiple uses in different classes/namespaces This list may not be exhaustive Core::Memory 12 bits (4096) QueryCacheBase 12 bits ShaderCache 14 bits (16384) TextureCache 20 bits (1048576, or 1MB) Fixes #8779
This commit is contained in:
parent
6f931d49c7
commit
14e9de6678
17 changed files with 116 additions and 119 deletions
|
@ -22,12 +22,3 @@ typedef void* HANDLE;
|
||||||
#include <microprofile.h>
|
#include <microprofile.h>
|
||||||
|
|
||||||
#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
|
#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
|
||||||
|
|
||||||
// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with
|
|
||||||
// identifiers we use.
|
|
||||||
#ifdef PAGE_SIZE
|
|
||||||
#undef PAGE_SIZE
|
|
||||||
#endif
|
|
||||||
#ifdef PAGE_MASK
|
|
||||||
#undef PAGE_MASK
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -190,8 +190,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||||
config.callbacks = cb.get();
|
config.callbacks = cb.get();
|
||||||
config.coprocessors[15] = cp15;
|
config.coprocessors[15] = cp15;
|
||||||
config.define_unpredictable_behaviour = true;
|
config.define_unpredictable_behaviour = true;
|
||||||
static constexpr std::size_t PAGE_BITS = 12;
|
static constexpr std::size_t YUZU_PAGEBITS = 12;
|
||||||
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
|
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS);
|
||||||
if (page_table) {
|
if (page_table) {
|
||||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||||
page_table->pointers.data());
|
page_table->pointers.data());
|
||||||
|
|
|
@ -14,7 +14,7 @@ namespace Loader {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
constexpr u32 PageAlignSize(u32 size) {
|
constexpr u32 PageAlignSize(u32 size) {
|
||||||
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
|
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr u32 PageAlignSize(u32 size) {
|
static constexpr u32 PageAlignSize(u32 size) {
|
||||||
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
|
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {
|
static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {
|
||||||
|
|
|
@ -45,7 +45,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u32 PageAlignSize(u32 size) {
|
constexpr u32 PageAlignSize(u32 size) {
|
||||||
return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
|
return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
|
|
@ -36,10 +36,11 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
|
||||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
|
||||||
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
|
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
|
||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
|
||||||
|
Common::PageType::Memory);
|
||||||
|
|
||||||
if (Settings::IsFastmemEnabled()) {
|
if (Settings::IsFastmemEnabled()) {
|
||||||
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
|
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
|
||||||
|
@ -47,9 +48,10 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
||||||
ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||||
ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
|
||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
|
||||||
|
Common::PageType::Unmapped);
|
||||||
|
|
||||||
if (Settings::IsFastmemEnabled()) {
|
if (Settings::IsFastmemEnabled()) {
|
||||||
system.DeviceMemory().buffer.Unmap(base, size);
|
system.DeviceMemory().buffer.Unmap(base, size);
|
||||||
|
@ -57,7 +59,7 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
|
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
|
||||||
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
||||||
|
|
||||||
if (!paddr) {
|
if (!paddr) {
|
||||||
return {};
|
return {};
|
||||||
|
@ -67,7 +69,7 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
|
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
|
||||||
const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
|
const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
|
||||||
|
|
||||||
if (paddr == 0) {
|
if (paddr == 0) {
|
||||||
return {};
|
return {};
|
||||||
|
@ -176,13 +178,14 @@ struct Memory::Impl {
|
||||||
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
|
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
|
||||||
const auto& page_table = process.PageTable().PageTableImpl();
|
const auto& page_table = process.PageTable().PageTableImpl();
|
||||||
std::size_t remaining_size = size;
|
std::size_t remaining_size = size;
|
||||||
std::size_t page_index = addr >> PAGE_BITS;
|
std::size_t page_index = addr >> YUZU_PAGEBITS;
|
||||||
std::size_t page_offset = addr & PAGE_MASK;
|
std::size_t page_offset = addr & YUZU_PAGEMASK;
|
||||||
|
|
||||||
while (remaining_size) {
|
while (remaining_size) {
|
||||||
const std::size_t copy_amount =
|
const std::size_t copy_amount =
|
||||||
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
|
std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
|
||||||
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
const auto current_vaddr =
|
||||||
|
static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
|
||||||
|
|
||||||
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -192,7 +195,7 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
case Common::PageType::Memory: {
|
case Common::PageType::Memory: {
|
||||||
DEBUG_ASSERT(pointer);
|
DEBUG_ASSERT(pointer);
|
||||||
u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS);
|
u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
|
||||||
on_memory(copy_amount, mem_ptr);
|
on_memory(copy_amount, mem_ptr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -339,10 +342,10 @@ struct Memory::Impl {
|
||||||
// Iterate over a contiguous CPU address space, marking/unmarking the region.
|
// Iterate over a contiguous CPU address space, marking/unmarking the region.
|
||||||
// The region is at a granularity of CPU pages.
|
// The region is at a granularity of CPU pages.
|
||||||
|
|
||||||
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
||||||
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
||||||
const Common::PageType page_type{
|
const Common::PageType page_type{
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
||||||
if (debug) {
|
if (debug) {
|
||||||
// Switch page type to debug if now debug
|
// Switch page type to debug if now debug
|
||||||
switch (page_type) {
|
switch (page_type) {
|
||||||
|
@ -354,7 +357,7 @@ struct Memory::Impl {
|
||||||
// Page is already marked.
|
// Page is already marked.
|
||||||
break;
|
break;
|
||||||
case Common::PageType::Memory:
|
case Common::PageType::Memory:
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::DebugMemory);
|
nullptr, Common::PageType::DebugMemory);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -371,9 +374,9 @@ struct Memory::Impl {
|
||||||
// Don't mess with already non-debug or rasterizer memory.
|
// Don't mess with already non-debug or rasterizer memory.
|
||||||
break;
|
break;
|
||||||
case Common::PageType::DebugMemory: {
|
case Common::PageType::DebugMemory: {
|
||||||
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)};
|
u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
|
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -398,10 +401,10 @@ struct Memory::Impl {
|
||||||
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
|
||||||
// is different). This assumes the specified GPU address region is contiguous as well.
|
// is different). This assumes the specified GPU address region is contiguous as well.
|
||||||
|
|
||||||
const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
|
const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
|
||||||
for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
|
for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
|
||||||
const Common::PageType page_type{
|
const Common::PageType page_type{
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
|
||||||
if (cached) {
|
if (cached) {
|
||||||
// Switch page type to cached if now cached
|
// Switch page type to cached if now cached
|
||||||
switch (page_type) {
|
switch (page_type) {
|
||||||
|
@ -411,7 +414,7 @@ struct Memory::Impl {
|
||||||
break;
|
break;
|
||||||
case Common::PageType::DebugMemory:
|
case Common::PageType::DebugMemory:
|
||||||
case Common::PageType::Memory:
|
case Common::PageType::Memory:
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::RasterizerCachedMemory);
|
nullptr, Common::PageType::RasterizerCachedMemory);
|
||||||
break;
|
break;
|
||||||
case Common::PageType::RasterizerCachedMemory:
|
case Common::PageType::RasterizerCachedMemory:
|
||||||
|
@ -434,16 +437,16 @@ struct Memory::Impl {
|
||||||
// that this area is already unmarked as cached.
|
// that this area is already unmarked as cached.
|
||||||
break;
|
break;
|
||||||
case Common::PageType::RasterizerCachedMemory: {
|
case Common::PageType::RasterizerCachedMemory: {
|
||||||
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
|
u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
|
||||||
if (pointer == nullptr) {
|
if (pointer == nullptr) {
|
||||||
// It's possible that this function has been called while updating the
|
// It's possible that this function has been called while updating the
|
||||||
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
// pagetable after unmapping a VMA. In that case the underlying VMA will no
|
||||||
// longer exist, and we should just leave the pagetable entry blank.
|
// longer exist, and we should just leave the pagetable entry blank.
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
nullptr, Common::PageType::Unmapped);
|
nullptr, Common::PageType::Unmapped);
|
||||||
} else {
|
} else {
|
||||||
current_page_table->pointers[vaddr >> PAGE_BITS].Store(
|
current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
|
||||||
pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
|
pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -465,8 +468,8 @@ struct Memory::Impl {
|
||||||
*/
|
*/
|
||||||
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
|
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
|
||||||
Common::PageType type) {
|
Common::PageType type) {
|
||||||
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE,
|
LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
|
||||||
(base + size) * PAGE_SIZE);
|
(base + size) * YUZU_PAGESIZE);
|
||||||
|
|
||||||
// During boot, current_page_table might not be set yet, in which case we need not flush
|
// During boot, current_page_table might not be set yet, in which case we need not flush
|
||||||
if (system.IsPoweredOn()) {
|
if (system.IsPoweredOn()) {
|
||||||
|
@ -474,7 +477,7 @@ struct Memory::Impl {
|
||||||
for (u64 i = 0; i < size; i++) {
|
for (u64 i = 0; i < size; i++) {
|
||||||
const auto page = base + i;
|
const auto page = base + i;
|
||||||
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
|
||||||
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
|
gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -485,7 +488,7 @@ struct Memory::Impl {
|
||||||
|
|
||||||
if (!target) {
|
if (!target) {
|
||||||
ASSERT_MSG(type != Common::PageType::Memory,
|
ASSERT_MSG(type != Common::PageType::Memory,
|
||||||
"Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
|
"Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
|
||||||
|
|
||||||
while (base != end) {
|
while (base != end) {
|
||||||
page_table.pointers[base].Store(nullptr, type);
|
page_table.pointers[base].Store(nullptr, type);
|
||||||
|
@ -496,14 +499,14 @@ struct Memory::Impl {
|
||||||
} else {
|
} else {
|
||||||
while (base != end) {
|
while (base != end) {
|
||||||
page_table.pointers[base].Store(
|
page_table.pointers[base].Store(
|
||||||
system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type);
|
system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
|
||||||
page_table.backing_addr[base] = target - (base << PAGE_BITS);
|
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
|
||||||
|
|
||||||
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
||||||
"memory mapping base yield a nullptr within the table");
|
"memory mapping base yield a nullptr within the table");
|
||||||
|
|
||||||
base += 1;
|
base += 1;
|
||||||
target += PAGE_SIZE;
|
target += YUZU_PAGESIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -518,7 +521,7 @@ struct Memory::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid adding any extra logic to this fast-path block
|
// Avoid adding any extra logic to this fast-path block
|
||||||
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
|
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
|
||||||
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
|
||||||
return &pointer[vaddr];
|
return &pointer[vaddr];
|
||||||
}
|
}
|
||||||
|
@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
|
||||||
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
||||||
const Kernel::KProcess& process = *system.CurrentProcess();
|
const Kernel::KProcess& process = *system.CurrentProcess();
|
||||||
const auto& page_table = process.PageTable().PageTableImpl();
|
const auto& page_table = process.PageTable().PageTableImpl();
|
||||||
const size_t page = vaddr >> PAGE_BITS;
|
const size_t page = vaddr >> YUZU_PAGEBITS;
|
||||||
if (page >= page_table.pointers.size()) {
|
if (page >= page_table.pointers.size()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
|
||||||
|
|
||||||
bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
|
bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
|
||||||
VAddr end = base + size;
|
VAddr end = base + size;
|
||||||
VAddr page = Common::AlignDown(base, PAGE_SIZE);
|
VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
|
||||||
|
|
||||||
for (; page < end; page += PAGE_SIZE) {
|
for (; page < end; page += YUZU_PAGESIZE) {
|
||||||
if (!IsValidVirtualAddress(page)) {
|
if (!IsValidVirtualAddress(page)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,9 +27,9 @@ namespace Core::Memory {
|
||||||
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
|
||||||
* be mapped.
|
* be mapped.
|
||||||
*/
|
*/
|
||||||
constexpr std::size_t PAGE_BITS = 12;
|
constexpr std::size_t YUZU_PAGEBITS = 12;
|
||||||
constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS;
|
constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
|
||||||
constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
|
constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
|
||||||
|
|
||||||
/// Virtual user-space memory regions
|
/// Virtual user-space memory regions
|
||||||
enum : VAddr {
|
enum : VAddr {
|
||||||
|
|
|
@ -22,8 +22,9 @@ constexpr VAddr c = 0x1328914000;
|
||||||
class RasterizerInterface {
|
class RasterizerInterface {
|
||||||
public:
|
public:
|
||||||
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
||||||
const u64 page_start{addr >> Core::Memory::PAGE_BITS};
|
const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
|
||||||
const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS};
|
const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
|
||||||
|
Core::Memory::YUZU_PAGEBITS};
|
||||||
for (u64 page = page_start; page < page_end; ++page) {
|
for (u64 page = page_start; page < page_end; ++page) {
|
||||||
int& value = page_table[page];
|
int& value = page_table[page];
|
||||||
value += delta;
|
value += delta;
|
||||||
|
@ -37,7 +38,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] int Count(VAddr addr) const noexcept {
|
[[nodiscard]] int Count(VAddr addr) const noexcept {
|
||||||
const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS);
|
const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS);
|
||||||
return it == page_table.end() ? 0 : it->second;
|
return it == page_table.end() ? 0 : it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ struct NullBufferParams {};
|
||||||
template <class RasterizerInterface>
|
template <class RasterizerInterface>
|
||||||
class BufferBase {
|
class BufferBase {
|
||||||
static constexpr u64 PAGES_PER_WORD = 64;
|
static constexpr u64 PAGES_PER_WORD = 64;
|
||||||
static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE;
|
static constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE;
|
||||||
static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
|
static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
|
||||||
|
|
||||||
/// Vector tracking modified pages tightly packed with small vector optimization
|
/// Vector tracking modified pages tightly packed with small vector optimization
|
||||||
|
|
|
@ -60,8 +60,8 @@ class BufferCache {
|
||||||
|
|
||||||
// Page size for caching purposes.
|
// Page size for caching purposes.
|
||||||
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
|
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
|
||||||
static constexpr u32 PAGE_BITS = 16;
|
static constexpr u32 YUZU_PAGEBITS = 16;
|
||||||
static constexpr u64 PAGE_SIZE = u64{1} << PAGE_BITS;
|
static constexpr u64 YUZU_PAGESIZE = u64{1} << YUZU_PAGEBITS;
|
||||||
|
|
||||||
static constexpr bool IS_OPENGL = P::IS_OPENGL;
|
static constexpr bool IS_OPENGL = P::IS_OPENGL;
|
||||||
static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS =
|
static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS =
|
||||||
|
@ -216,8 +216,8 @@ private:
|
||||||
|
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
|
void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
|
||||||
const u64 page_end = Common::DivCeil(cpu_addr + size, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(cpu_addr + size, YUZU_PAGESIZE);
|
||||||
for (u64 page = cpu_addr >> PAGE_BITS; page < page_end;) {
|
for (u64 page = cpu_addr >> YUZU_PAGEBITS; page < page_end;) {
|
||||||
const BufferId buffer_id = page_table[page];
|
const BufferId buffer_id = page_table[page];
|
||||||
if (!buffer_id) {
|
if (!buffer_id) {
|
||||||
++page;
|
++page;
|
||||||
|
@ -227,7 +227,7 @@ private:
|
||||||
func(buffer_id, buffer);
|
func(buffer_id, buffer);
|
||||||
|
|
||||||
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
||||||
page = Common::DivCeil(end_addr, PAGE_SIZE);
|
page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,8 +262,8 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
|
static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
|
||||||
return (cpu_addr & ~Core::Memory::PAGE_MASK) ==
|
return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) ==
|
||||||
((cpu_addr + size) & ~Core::Memory::PAGE_MASK);
|
((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RunGarbageCollector();
|
void RunGarbageCollector();
|
||||||
|
@ -439,7 +439,7 @@ private:
|
||||||
u64 minimum_memory = 0;
|
u64 minimum_memory = 0;
|
||||||
u64 critical_memory = 0;
|
u64 critical_memory = 0;
|
||||||
|
|
||||||
std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table;
|
std::array<BufferId, ((1ULL << 39) >> YUZU_PAGEBITS)> page_table;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
|
@ -926,8 +926,8 @@ void BufferCache<P>::PopAsyncFlushes() {}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
|
bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
|
||||||
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
|
||||||
for (u64 page = addr >> PAGE_BITS; page < page_end;) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
|
||||||
const BufferId image_id = page_table[page];
|
const BufferId image_id = page_table[page];
|
||||||
if (!image_id) {
|
if (!image_id) {
|
||||||
++page;
|
++page;
|
||||||
|
@ -938,7 +938,7 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
||||||
page = Common::DivCeil(end_addr, PAGE_SIZE);
|
page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -946,8 +946,8 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
|
bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
|
||||||
const VAddr end_addr = addr + size;
|
const VAddr end_addr = addr + size;
|
||||||
const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(end_addr, YUZU_PAGESIZE);
|
||||||
for (u64 page = addr >> PAGE_BITS; page < page_end;) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
|
||||||
const BufferId buffer_id = page_table[page];
|
const BufferId buffer_id = page_table[page];
|
||||||
if (!buffer_id) {
|
if (!buffer_id) {
|
||||||
++page;
|
++page;
|
||||||
|
@ -959,15 +959,15 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
|
||||||
if (buf_start_addr < end_addr && addr < buf_end_addr) {
|
if (buf_start_addr < end_addr && addr < buf_end_addr) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
page = Common::DivCeil(end_addr, PAGE_SIZE);
|
page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
|
bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
|
||||||
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
|
||||||
for (u64 page = addr >> PAGE_BITS; page < page_end;) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
|
||||||
const BufferId image_id = page_table[page];
|
const BufferId image_id = page_table[page];
|
||||||
if (!image_id) {
|
if (!image_id) {
|
||||||
++page;
|
++page;
|
||||||
|
@ -978,7 +978,7 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
|
||||||
page = Common::DivCeil(end_addr, PAGE_SIZE);
|
page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1472,7 +1472,7 @@ BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) {
|
||||||
if (cpu_addr == 0) {
|
if (cpu_addr == 0) {
|
||||||
return NULL_BUFFER_ID;
|
return NULL_BUFFER_ID;
|
||||||
}
|
}
|
||||||
const u64 page = cpu_addr >> PAGE_BITS;
|
const u64 page = cpu_addr >> YUZU_PAGEBITS;
|
||||||
const BufferId buffer_id = page_table[page];
|
const BufferId buffer_id = page_table[page];
|
||||||
if (!buffer_id) {
|
if (!buffer_id) {
|
||||||
return CreateBuffer(cpu_addr, size);
|
return CreateBuffer(cpu_addr, size);
|
||||||
|
@ -1493,8 +1493,9 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
|
||||||
VAddr end = cpu_addr + wanted_size;
|
VAddr end = cpu_addr + wanted_size;
|
||||||
int stream_score = 0;
|
int stream_score = 0;
|
||||||
bool has_stream_leap = false;
|
bool has_stream_leap = false;
|
||||||
for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) {
|
for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE);
|
||||||
const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS];
|
cpu_addr += YUZU_PAGESIZE) {
|
||||||
|
const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS];
|
||||||
if (!overlap_id) {
|
if (!overlap_id) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1520,11 +1521,11 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
|
||||||
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
||||||
has_stream_leap = true;
|
has_stream_leap = true;
|
||||||
if (expands_right) {
|
if (expands_right) {
|
||||||
begin -= PAGE_SIZE * 256;
|
begin -= YUZU_PAGESIZE * 256;
|
||||||
cpu_addr = begin;
|
cpu_addr = begin;
|
||||||
}
|
}
|
||||||
if (expands_left) {
|
if (expands_left) {
|
||||||
end += PAGE_SIZE * 256;
|
end += YUZU_PAGESIZE * 256;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1598,8 +1599,8 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
|
||||||
}
|
}
|
||||||
const VAddr cpu_addr_begin = buffer.CpuAddr();
|
const VAddr cpu_addr_begin = buffer.CpuAddr();
|
||||||
const VAddr cpu_addr_end = cpu_addr_begin + size;
|
const VAddr cpu_addr_end = cpu_addr_begin + size;
|
||||||
const u64 page_begin = cpu_addr_begin / PAGE_SIZE;
|
const u64 page_begin = cpu_addr_begin / YUZU_PAGESIZE;
|
||||||
const u64 page_end = Common::DivCeil(cpu_addr_end, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(cpu_addr_end, YUZU_PAGESIZE);
|
||||||
for (u64 page = page_begin; page != page_end; ++page) {
|
for (u64 page = page_begin; page != page_end; ++page) {
|
||||||
if constexpr (insert) {
|
if constexpr (insert) {
|
||||||
page_table[page] = buffer_id;
|
page_table[page] = buffer_id;
|
||||||
|
|
|
@ -369,8 +369,8 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
|
||||||
if (!cpu_addr) {
|
if (!cpu_addr) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size};
|
const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
|
||||||
return page <= Core::Memory::PAGE_SIZE;
|
return page <= Core::Memory::YUZU_PAGESIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
|
bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
|
||||||
|
|
|
@ -214,8 +214,8 @@ private:
|
||||||
return cache_begin < addr_end && addr_begin < cache_end;
|
return cache_begin < addr_end && addr_begin < cache_end;
|
||||||
};
|
};
|
||||||
|
|
||||||
const u64 page_end = addr_end >> PAGE_BITS;
|
const u64 page_end = addr_end >> YUZU_PAGEBITS;
|
||||||
for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) {
|
for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) {
|
||||||
const auto& it = cached_queries.find(page);
|
const auto& it = cached_queries.find(page);
|
||||||
if (it == std::end(cached_queries)) {
|
if (it == std::end(cached_queries)) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -235,14 +235,14 @@ private:
|
||||||
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
||||||
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
||||||
rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
|
rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
|
||||||
const u64 page = static_cast<u64>(cpu_addr) >> PAGE_BITS;
|
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
|
||||||
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
||||||
host_ptr);
|
host_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to a get a cached query. Returns nullptr on failure.
|
/// Tries to a get a cached query. Returns nullptr on failure.
|
||||||
CachedQuery* TryGet(VAddr addr) {
|
CachedQuery* TryGet(VAddr addr) {
|
||||||
const u64 page = static_cast<u64>(addr) >> PAGE_BITS;
|
const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS;
|
||||||
const auto it = cached_queries.find(page);
|
const auto it = cached_queries.find(page);
|
||||||
if (it == std::end(cached_queries)) {
|
if (it == std::end(cached_queries)) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -260,8 +260,8 @@ private:
|
||||||
uncommitted_flushes->push_back(addr);
|
uncommitted_flushes->push_back(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr std::uintptr_t PAGE_SIZE = 4096;
|
static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
|
||||||
static constexpr unsigned PAGE_BITS = 12;
|
static constexpr unsigned YUZU_PAGEBITS = 12;
|
||||||
|
|
||||||
VideoCore::RasterizerInterface& rasterizer;
|
VideoCore::RasterizerInterface& rasterizer;
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||||
|
|
|
@ -24,8 +24,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
|
||||||
u64 cache_bytes = 0;
|
u64 cache_bytes = 0;
|
||||||
|
|
||||||
std::atomic_thread_fence(std::memory_order_acquire);
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
|
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
|
||||||
for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
|
||||||
std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
|
std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
|
||||||
|
|
||||||
if (delta > 0) {
|
if (delta > 0) {
|
||||||
|
@ -44,26 +44,27 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
|
||||||
if (uncache_bytes == 0) {
|
if (uncache_bytes == 0) {
|
||||||
uncache_begin = page;
|
uncache_begin = page;
|
||||||
}
|
}
|
||||||
uncache_bytes += PAGE_SIZE;
|
uncache_bytes += YUZU_PAGESIZE;
|
||||||
} else if (uncache_bytes > 0) {
|
} else if (uncache_bytes > 0) {
|
||||||
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false);
|
cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
|
||||||
|
false);
|
||||||
uncache_bytes = 0;
|
uncache_bytes = 0;
|
||||||
}
|
}
|
||||||
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
|
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
|
||||||
if (cache_bytes == 0) {
|
if (cache_bytes == 0) {
|
||||||
cache_begin = page;
|
cache_begin = page;
|
||||||
}
|
}
|
||||||
cache_bytes += PAGE_SIZE;
|
cache_bytes += YUZU_PAGESIZE;
|
||||||
} else if (cache_bytes > 0) {
|
} else if (cache_bytes > 0) {
|
||||||
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true);
|
cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
|
||||||
cache_bytes = 0;
|
cache_bytes = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (uncache_bytes > 0) {
|
if (uncache_bytes > 0) {
|
||||||
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false);
|
cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
|
||||||
}
|
}
|
||||||
if (cache_bytes > 0) {
|
if (cache_bytes > 0) {
|
||||||
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true);
|
cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -123,8 +123,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
|
||||||
const VAddr addr_end = addr + size;
|
const VAddr addr_end = addr + size;
|
||||||
Entry* const entry = NewEntry(addr, addr_end, data.get());
|
Entry* const entry = NewEntry(addr, addr_end, data.get());
|
||||||
|
|
||||||
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS;
|
const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
|
||||||
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
|
||||||
invalidation_cache[page].push_back(entry);
|
invalidation_cache[page].push_back(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,8 +135,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
|
||||||
|
|
||||||
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
|
void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
|
||||||
const VAddr addr_end = addr + size;
|
const VAddr addr_end = addr + size;
|
||||||
const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS;
|
const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
|
||||||
for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
|
||||||
auto it = invalidation_cache.find(page);
|
auto it = invalidation_cache.find(page);
|
||||||
if (it == invalidation_cache.end()) {
|
if (it == invalidation_cache.end()) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -189,8 +189,8 @@ void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) {
|
void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) {
|
||||||
const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS;
|
const u64 page_end = (entry->addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
|
||||||
for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) {
|
for (u64 page = entry->addr_start >> YUZU_PAGEBITS; page < page_end; ++page) {
|
||||||
const auto entries_it = invalidation_cache.find(page);
|
const auto entries_it = invalidation_cache.find(page);
|
||||||
ASSERT(entries_it != invalidation_cache.end());
|
ASSERT(entries_it != invalidation_cache.end());
|
||||||
std::vector<Entry*>& entries = entries_it->second;
|
std::vector<Entry*>& entries = entries_it->second;
|
||||||
|
|
|
@ -29,8 +29,8 @@ struct ShaderInfo {
|
||||||
};
|
};
|
||||||
|
|
||||||
class ShaderCache {
|
class ShaderCache {
|
||||||
static constexpr u64 PAGE_BITS = 14;
|
static constexpr u64 YUZU_PAGEBITS = 14;
|
||||||
static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;
|
static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
|
||||||
|
|
||||||
static constexpr size_t NUM_PROGRAMS = 6;
|
static constexpr size_t NUM_PROGRAMS = 6;
|
||||||
|
|
||||||
|
|
|
@ -589,7 +589,7 @@ void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
|
||||||
template <class P>
|
template <class P>
|
||||||
typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) {
|
typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) {
|
||||||
// TODO: Properly implement this
|
// TODO: Properly implement this
|
||||||
const auto it = page_table.find(cpu_addr >> PAGE_BITS);
|
const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS);
|
||||||
if (it == page_table.end()) {
|
if (it == page_table.end()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -1485,14 +1485,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
||||||
std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
|
std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
|
||||||
const auto page_it = selected_page_table.find(page);
|
const auto page_it = selected_page_table.find(page);
|
||||||
if (page_it == selected_page_table.end()) {
|
if (page_it == selected_page_table.end()) {
|
||||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::vector<ImageId>& image_ids = page_it->second;
|
std::vector<ImageId>& image_ids = page_it->second;
|
||||||
const auto vector_it = std::ranges::find(image_ids, image_id);
|
const auto vector_it = std::ranges::find(image_ids, image_id);
|
||||||
if (vector_it == image_ids.end()) {
|
if (vector_it == image_ids.end()) {
|
||||||
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
||||||
page << PAGE_BITS);
|
page << YUZU_PAGEBITS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
image_ids.erase(vector_it);
|
image_ids.erase(vector_it);
|
||||||
|
@ -1504,14 +1504,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
||||||
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
|
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
|
||||||
const auto page_it = page_table.find(page);
|
const auto page_it = page_table.find(page);
|
||||||
if (page_it == page_table.end()) {
|
if (page_it == page_table.end()) {
|
||||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
||||||
const auto vector_it = std::ranges::find(image_map_ids, map_id);
|
const auto vector_it = std::ranges::find(image_map_ids, map_id);
|
||||||
if (vector_it == image_map_ids.end()) {
|
if (vector_it == image_map_ids.end()) {
|
||||||
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
||||||
page << PAGE_BITS);
|
page << YUZU_PAGEBITS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
image_map_ids.erase(vector_it);
|
image_map_ids.erase(vector_it);
|
||||||
|
@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
||||||
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
|
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
|
||||||
const auto page_it = page_table.find(page);
|
const auto page_it = page_table.find(page);
|
||||||
if (page_it == page_table.end()) {
|
if (page_it == page_table.end()) {
|
||||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
||||||
|
|
|
@ -47,7 +47,7 @@ struct ImageViewInOut {
|
||||||
template <class P>
|
template <class P>
|
||||||
class TextureCache {
|
class TextureCache {
|
||||||
/// Address shift for caching images into a hash table
|
/// Address shift for caching images into a hash table
|
||||||
static constexpr u64 PAGE_BITS = 20;
|
static constexpr u64 YUZU_PAGEBITS = 20;
|
||||||
|
|
||||||
/// Enables debugging features to the texture cache
|
/// Enables debugging features to the texture cache
|
||||||
static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
|
static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
|
||||||
|
@ -178,8 +178,8 @@ private:
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) {
|
static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) {
|
||||||
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
|
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
|
||||||
const u64 page_end = (addr + size - 1) >> PAGE_BITS;
|
const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
|
||||||
for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
|
||||||
if constexpr (RETURNS_BOOL) {
|
if constexpr (RETURNS_BOOL) {
|
||||||
if (func(page)) {
|
if (func(page)) {
|
||||||
break;
|
break;
|
||||||
|
@ -193,8 +193,8 @@ private:
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) {
|
static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) {
|
||||||
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
|
static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
|
||||||
const u64 page_end = (addr + size - 1) >> PAGE_BITS;
|
const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
|
||||||
for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) {
|
for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
|
||||||
if constexpr (RETURNS_BOOL) {
|
if constexpr (RETURNS_BOOL) {
|
||||||
if (func(page)) {
|
if (func(page)) {
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Reference in a new issue