mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-30 10:42:47 +01:00
Merge pull request #2413 from FernandoS27/opt-gpu
Rasterizer Cache: refactor flushing & optimize memory usage of surfaces
This commit is contained in:
commit
940a71089d
7 changed files with 54 additions and 33 deletions
|
@ -37,9 +37,6 @@ public:
|
|||
/// Gets the size of the shader in guest memory, required for cache management
|
||||
virtual std::size_t GetSizeInBytes() const = 0;
|
||||
|
||||
/// Wriets any cached resources back to memory
|
||||
virtual void Flush() = 0;
|
||||
|
||||
/// Sets whether the cached object should be considered registered
|
||||
void SetIsRegistered(bool registered) {
|
||||
is_registered = registered;
|
||||
|
@ -158,6 +155,8 @@ protected:
|
|||
return ++modified_ticks;
|
||||
}
|
||||
|
||||
virtual void FlushObjectInner(const T& object) = 0;
|
||||
|
||||
/// Flushes the specified object, updating appropriate cache state as needed
|
||||
void FlushObject(const T& object) {
|
||||
std::lock_guard lock{mutex};
|
||||
|
@ -165,7 +164,7 @@ protected:
|
|||
if (!object->IsDirty()) {
|
||||
return;
|
||||
}
|
||||
object->Flush();
|
||||
FlushObjectInner(object);
|
||||
object->MarkAsModified(false, *this);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,9 +42,6 @@ public:
|
|||
return alignment;
|
||||
}
|
||||
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void Flush() override {}
|
||||
|
||||
private:
|
||||
VAddr cpu_addr{};
|
||||
std::size_t size{};
|
||||
|
@ -75,6 +72,9 @@ public:
|
|||
protected:
|
||||
void AlignBuffer(std::size_t alignment);
|
||||
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void FlushObjectInner(const std::shared_ptr<CachedBufferEntry>& object) override {}
|
||||
|
||||
private:
|
||||
OGLStreamBuffer stream_buffer;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ public:
|
|||
/// Reloads the global region from guest memory
|
||||
void Reload(u32 size_);
|
||||
|
||||
void Flush() override;
|
||||
void Flush();
|
||||
|
||||
private:
|
||||
VAddr cpu_addr{};
|
||||
|
@ -65,6 +65,11 @@ public:
|
|||
GlobalRegion GetGlobalRegion(const GLShader::GlobalMemoryEntry& descriptor,
|
||||
Tegra::Engines::Maxwell3D::Regs::ShaderStage stage);
|
||||
|
||||
protected:
|
||||
void FlushObjectInner(const GlobalRegion& object) override {
|
||||
object->Flush();
|
||||
}
|
||||
|
||||
private:
|
||||
GlobalRegion TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const;
|
||||
GlobalRegion GetUncachedGlobalRegion(GPUVAddr addr, u8* host_ptr, u32 size);
|
||||
|
|
|
@ -628,8 +628,10 @@ CachedSurface::CachedSurface(const SurfaceParams& params)
|
|||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
|
||||
void CachedSurface::LoadGLBuffer() {
|
||||
void CachedSurface::LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
|
||||
MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
|
||||
auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
|
||||
if (gl_buffer.size() < params.max_mip_level)
|
||||
gl_buffer.resize(params.max_mip_level);
|
||||
for (u32 i = 0; i < params.max_mip_level; i++)
|
||||
gl_buffer[i].resize(params.GetMipmapSizeGL(i));
|
||||
|
@ -671,13 +673,13 @@ void CachedSurface::LoadGLBuffer() {
|
|||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
|
||||
void CachedSurface::FlushGLBuffer() {
|
||||
void CachedSurface::FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
|
||||
MICROPROFILE_SCOPE(OpenGL_SurfaceFlush);
|
||||
|
||||
ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented");
|
||||
|
||||
auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
|
||||
// OpenGL temporary buffer needs to be big enough to store raw texture size
|
||||
gl_buffer.resize(1);
|
||||
gl_buffer[0].resize(GetSizeInBytes());
|
||||
|
||||
const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
|
||||
|
@ -713,10 +715,12 @@ void CachedSurface::FlushGLBuffer() {
|
|||
}
|
||||
}
|
||||
|
||||
void CachedSurface::UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle,
|
||||
GLuint draw_fb_handle) {
|
||||
void CachedSurface::UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
|
||||
GLuint read_fb_handle, GLuint draw_fb_handle) {
|
||||
const auto& rect{params.GetRect(mip_map)};
|
||||
|
||||
auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
|
||||
|
||||
// Load data from memory to the surface
|
||||
const auto x0 = static_cast<GLint>(rect.left);
|
||||
const auto y0 = static_cast<GLint>(rect.bottom);
|
||||
|
@ -845,11 +849,12 @@ void CachedSurface::EnsureTextureDiscrepantView() {
|
|||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_TextureUL, "OpenGL", "Texture Upload", MP_RGB(128, 192, 64));
|
||||
void CachedSurface::UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle) {
|
||||
void CachedSurface::UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem,
|
||||
GLuint read_fb_handle, GLuint draw_fb_handle) {
|
||||
MICROPROFILE_SCOPE(OpenGL_TextureUL);
|
||||
|
||||
for (u32 i = 0; i < params.max_mip_level; i++)
|
||||
UploadGLMipmapTexture(i, read_fb_handle, draw_fb_handle);
|
||||
UploadGLMipmapTexture(res_cache_tmp_mem, i, read_fb_handle, draw_fb_handle);
|
||||
}
|
||||
|
||||
void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
|
||||
|
@ -929,8 +934,8 @@ Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool pre
|
|||
}
|
||||
|
||||
void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) {
|
||||
surface->LoadGLBuffer();
|
||||
surface->UploadGLTexture(read_framebuffer.handle, draw_framebuffer.handle);
|
||||
surface->LoadGLBuffer(temporal_memory);
|
||||
surface->UploadGLTexture(temporal_memory, read_framebuffer.handle, draw_framebuffer.handle);
|
||||
surface->MarkAsModified(false, *this);
|
||||
surface->MarkForReload(false);
|
||||
}
|
||||
|
|
|
@ -355,6 +355,12 @@ namespace OpenGL {
|
|||
|
||||
class RasterizerOpenGL;
|
||||
|
||||
// This is used to store temporary big buffers,
|
||||
// instead of creating/destroying all the time
|
||||
struct RasterizerTemporaryMemory {
|
||||
std::vector<std::vector<u8>> gl_buffer;
|
||||
};
|
||||
|
||||
class CachedSurface final : public RasterizerCacheObject {
|
||||
public:
|
||||
explicit CachedSurface(const SurfaceParams& params);
|
||||
|
@ -371,10 +377,6 @@ public:
|
|||
return memory_size;
|
||||
}
|
||||
|
||||
void Flush() override {
|
||||
FlushGLBuffer();
|
||||
}
|
||||
|
||||
const OGLTexture& Texture() const {
|
||||
return texture;
|
||||
}
|
||||
|
@ -397,11 +399,12 @@ public:
|
|||
}
|
||||
|
||||
// Read/Write data in Switch memory to/from gl_buffer
|
||||
void LoadGLBuffer();
|
||||
void FlushGLBuffer();
|
||||
void LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
|
||||
void FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
|
||||
|
||||
// Upload data in gl_buffer to this surface's texture
|
||||
void UploadGLTexture(GLuint read_fb_handle, GLuint draw_fb_handle);
|
||||
void UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, GLuint read_fb_handle,
|
||||
GLuint draw_fb_handle);
|
||||
|
||||
void UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
|
||||
Tegra::Texture::SwizzleSource swizzle_y,
|
||||
|
@ -429,13 +432,13 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
void UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, GLuint draw_fb_handle);
|
||||
void UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
|
||||
GLuint read_fb_handle, GLuint draw_fb_handle);
|
||||
|
||||
void EnsureTextureDiscrepantView();
|
||||
|
||||
OGLTexture texture;
|
||||
OGLTexture discrepant_view;
|
||||
std::vector<std::vector<u8>> gl_buffer;
|
||||
SurfaceParams params{};
|
||||
GLenum gl_target{};
|
||||
GLenum gl_internal_format{};
|
||||
|
@ -473,6 +476,11 @@ public:
|
|||
void SignalPreDrawCall();
|
||||
void SignalPostDrawCall();
|
||||
|
||||
protected:
|
||||
void FlushObjectInner(const Surface& object) override {
|
||||
object->FlushGLBuffer(temporal_memory);
|
||||
}
|
||||
|
||||
private:
|
||||
void LoadSurface(const Surface& surface);
|
||||
Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
|
||||
|
@ -519,6 +527,8 @@ private:
|
|||
std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers;
|
||||
Surface last_depth_buffer;
|
||||
|
||||
RasterizerTemporaryMemory temporal_memory;
|
||||
|
||||
using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>;
|
||||
using SurfaceInterval = typename SurfaceIntervalCache::interval_type;
|
||||
|
||||
|
|
|
@ -57,9 +57,6 @@ public:
|
|||
return shader_length;
|
||||
}
|
||||
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void Flush() override {}
|
||||
|
||||
/// Gets the shader entries for the shader
|
||||
const GLShader::ShaderEntries& GetShaderEntries() const {
|
||||
return entries;
|
||||
|
@ -123,6 +120,10 @@ public:
|
|||
/// Gets the current specified shader stage program
|
||||
Shader GetStageProgram(Maxwell::ShaderProgram program);
|
||||
|
||||
protected:
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void FlushObjectInner(const Shader& object) override {}
|
||||
|
||||
private:
|
||||
std::unordered_map<u64, UnspecializedShader> GenerateUnspecializedShaders(
|
||||
const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback,
|
||||
|
|
|
@ -49,9 +49,6 @@ public:
|
|||
return alignment;
|
||||
}
|
||||
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void Flush() override {}
|
||||
|
||||
private:
|
||||
VAddr cpu_addr{};
|
||||
std::size_t size{};
|
||||
|
@ -87,6 +84,10 @@ public:
|
|||
return buffer_handle;
|
||||
}
|
||||
|
||||
protected:
|
||||
// We do not have to flush this cache as things in it are never modified by us.
|
||||
void FlushObjectInner(const std::shared_ptr<CachedBufferEntry>& object) override {}
|
||||
|
||||
private:
|
||||
void AlignBuffer(std::size_t alignment);
|
||||
|
||||
|
|
Loading…
Reference in a new issue