mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-27 01:02:48 +01:00
texture_cache: Handle uncontinuous surfaces.
This commit is contained in:
parent
e60ed2bb3e
commit
bdf9faab33
4 changed files with 82 additions and 21 deletions
|
@ -202,11 +202,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const {
|
bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const {
|
||||||
const GPUVAddr end = start + size;
|
const std::size_t inner_size = size - 1;
|
||||||
|
const GPUVAddr end = start + inner_size;
|
||||||
const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
|
const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
|
||||||
const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
|
const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
|
||||||
const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
|
const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
|
||||||
return range == size;
|
return range == inner_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const {
|
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const {
|
||||||
|
|
|
@ -68,12 +68,27 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf
|
||||||
}
|
}
|
||||||
|
|
||||||
void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
std::vector<u8>& staging_buffer) {
|
StagingCache& staging_cache) {
|
||||||
MICROPROFILE_SCOPE(GPU_Load_Texture);
|
MICROPROFILE_SCOPE(GPU_Load_Texture);
|
||||||
const auto host_ptr{memory_manager.GetPointer(gpu_addr)};
|
auto& staging_buffer = staging_cache.GetBuffer(0);
|
||||||
|
u8* host_ptr;
|
||||||
|
is_continuous = memory_manager.IsBlockContinuous(gpu_addr, guest_memory_size);
|
||||||
|
|
||||||
|
// Handle continuouty
|
||||||
|
if (is_continuous) {
|
||||||
|
// Use physical memory directly
|
||||||
|
host_ptr = memory_manager.GetPointer(gpu_addr);
|
||||||
if (!host_ptr) {
|
if (!host_ptr) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Use an extra temporal buffer
|
||||||
|
auto& tmp_buffer = staging_cache.GetBuffer(1);
|
||||||
|
tmp_buffer.resize(guest_memory_size);
|
||||||
|
host_ptr = tmp_buffer.data();
|
||||||
|
memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
|
||||||
|
}
|
||||||
|
|
||||||
if (params.is_tiled) {
|
if (params.is_tiled) {
|
||||||
ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
|
ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
|
||||||
params.block_width, static_cast<u32>(params.target));
|
params.block_width, static_cast<u32>(params.target));
|
||||||
|
@ -123,12 +138,25 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
}
|
}
|
||||||
|
|
||||||
void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
|
void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
std::vector<u8>& staging_buffer) {
|
StagingCache& staging_cache) {
|
||||||
MICROPROFILE_SCOPE(GPU_Flush_Texture);
|
MICROPROFILE_SCOPE(GPU_Flush_Texture);
|
||||||
const auto host_ptr{memory_manager.GetPointer(gpu_addr)};
|
auto& staging_buffer = staging_cache.GetBuffer(0);
|
||||||
|
u8* host_ptr;
|
||||||
|
|
||||||
|
// Handle continuouty
|
||||||
|
if (is_continuous) {
|
||||||
|
// Use physical memory directly
|
||||||
|
host_ptr = memory_manager.GetPointer(gpu_addr);
|
||||||
if (!host_ptr) {
|
if (!host_ptr) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Use an extra temporal buffer
|
||||||
|
auto& tmp_buffer = staging_cache.GetBuffer(1);
|
||||||
|
tmp_buffer.resize(guest_memory_size);
|
||||||
|
host_ptr = tmp_buffer.data();
|
||||||
|
}
|
||||||
|
|
||||||
if (params.is_tiled) {
|
if (params.is_tiled) {
|
||||||
ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
|
ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
|
@ -154,6 +182,9 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!is_continuous) {
|
||||||
|
memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
|
|
@ -32,11 +32,28 @@ enum class MatchStructureResult : u32 {
|
||||||
None = 2,
|
None = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class StagingCache {
|
||||||
|
public:
|
||||||
|
StagingCache() {}
|
||||||
|
~StagingCache() = default;
|
||||||
|
|
||||||
|
std::vector<u8>& GetBuffer(std::size_t index) {
|
||||||
|
return staging_buffer[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetSize(std::size_t size) {
|
||||||
|
staging_buffer.resize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::vector<u8>> staging_buffer;
|
||||||
|
};
|
||||||
|
|
||||||
class SurfaceBaseImpl {
|
class SurfaceBaseImpl {
|
||||||
public:
|
public:
|
||||||
void LoadBuffer(Tegra::MemoryManager& memory_manager, std::vector<u8>& staging_buffer);
|
void LoadBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
|
||||||
|
|
||||||
void FlushBuffer(Tegra::MemoryManager& memory_manager, std::vector<u8>& staging_buffer);
|
void FlushBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
|
||||||
|
|
||||||
GPUVAddr GetGpuAddr() const {
|
GPUVAddr GetGpuAddr() const {
|
||||||
return gpu_addr;
|
return gpu_addr;
|
||||||
|
@ -93,6 +110,14 @@ public:
|
||||||
return mipmap_sizes[level];
|
return mipmap_sizes[level];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MarkAsContinuous(const bool is_continuous) {
|
||||||
|
this->is_continuous = is_continuous;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsContinuous() const {
|
||||||
|
return is_continuous;
|
||||||
|
}
|
||||||
|
|
||||||
bool IsLinear() const {
|
bool IsLinear() const {
|
||||||
return !params.is_tiled;
|
return !params.is_tiled;
|
||||||
}
|
}
|
||||||
|
@ -122,8 +147,8 @@ public:
|
||||||
MatchStructureResult MatchesStructure(const SurfaceParams& rhs) const {
|
MatchStructureResult MatchesStructure(const SurfaceParams& rhs) const {
|
||||||
// Buffer surface Check
|
// Buffer surface Check
|
||||||
if (params.IsBuffer()) {
|
if (params.IsBuffer()) {
|
||||||
const std::size_t wd1 = params.width*params.GetBytesPerPixel();
|
const std::size_t wd1 = params.width * params.GetBytesPerPixel();
|
||||||
const std::size_t wd2 = rhs.width*rhs.GetBytesPerPixel();
|
const std::size_t wd2 = rhs.width * rhs.GetBytesPerPixel();
|
||||||
if (wd1 == wd2) {
|
if (wd1 == wd2) {
|
||||||
return MatchStructureResult::FullMatch;
|
return MatchStructureResult::FullMatch;
|
||||||
}
|
}
|
||||||
|
@ -193,6 +218,7 @@ protected:
|
||||||
CacheAddr cache_addr{};
|
CacheAddr cache_addr{};
|
||||||
CacheAddr cache_addr_end{};
|
CacheAddr cache_addr_end{};
|
||||||
VAddr cpu_addr{};
|
VAddr cpu_addr{};
|
||||||
|
bool is_continuous{};
|
||||||
|
|
||||||
std::vector<std::size_t> mipmap_sizes;
|
std::vector<std::size_t> mipmap_sizes;
|
||||||
std::vector<std::size_t> mipmap_offsets;
|
std::vector<std::size_t> mipmap_offsets;
|
||||||
|
|
|
@ -220,6 +220,7 @@ protected:
|
||||||
SetEmptyColorBuffer(i);
|
SetEmptyColorBuffer(i);
|
||||||
}
|
}
|
||||||
SetEmptyDepthBuffer();
|
SetEmptyDepthBuffer();
|
||||||
|
staging_cache.SetSize(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
~TextureCache() = default;
|
~TextureCache() = default;
|
||||||
|
@ -244,6 +245,8 @@ protected:
|
||||||
gpu_addr);
|
gpu_addr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
bool continuouty = memory_manager->IsBlockContinuous(gpu_addr, size);
|
||||||
|
surface->MarkAsContinuous(continuouty);
|
||||||
surface->SetCacheAddr(cache_ptr);
|
surface->SetCacheAddr(cache_ptr);
|
||||||
surface->SetCpuAddr(*cpu_addr);
|
surface->SetCpuAddr(*cpu_addr);
|
||||||
RegisterInnerCache(surface);
|
RegisterInnerCache(surface);
|
||||||
|
@ -611,9 +614,9 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadSurface(const TSurface& surface) {
|
void LoadSurface(const TSurface& surface) {
|
||||||
staging_buffer.resize(surface->GetHostSizeInBytes());
|
staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
|
||||||
surface->LoadBuffer(*memory_manager, staging_buffer);
|
surface->LoadBuffer(*memory_manager, staging_cache);
|
||||||
surface->UploadTexture(staging_buffer);
|
surface->UploadTexture(staging_cache.GetBuffer(0));
|
||||||
surface->MarkAsModified(false, Tick());
|
surface->MarkAsModified(false, Tick());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -621,9 +624,9 @@ private:
|
||||||
if (!surface->IsModified()) {
|
if (!surface->IsModified()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
staging_buffer.resize(surface->GetHostSizeInBytes());
|
staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
|
||||||
surface->DownloadTexture(staging_buffer);
|
surface->DownloadTexture(staging_cache.GetBuffer(0));
|
||||||
surface->FlushBuffer(*memory_manager, staging_buffer);
|
surface->FlushBuffer(*memory_manager, staging_cache);
|
||||||
surface->MarkAsModified(false, Tick());
|
surface->MarkAsModified(false, Tick());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,7 +726,7 @@ private:
|
||||||
render_targets;
|
render_targets;
|
||||||
FramebufferTargetInfo depth_buffer;
|
FramebufferTargetInfo depth_buffer;
|
||||||
|
|
||||||
std::vector<u8> staging_buffer;
|
StagingCache staging_cache;
|
||||||
std::recursive_mutex mutex;
|
std::recursive_mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue