Merge pull request #7368 from FernandoS27/vulkan-conv

Fix ART Blit detection regression and add D24S8 <-> RGBA8 conv to Vulkan
This commit is contained in:
bunnei 2021-11-20 16:51:13 -08:00 committed by GitHub
commit ea6fa044f3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 595 additions and 23 deletions

View file

@ -7,6 +7,7 @@
#include <bit> #include <bit>
#include <climits> #include <climits>
#include <cstddef> #include <cstddef>
#include <type_traits>
#include "common/common_types.h" #include "common/common_types.h"
@ -44,4 +45,10 @@ template <typename T>
return static_cast<u32>(log2_f + static_cast<u64>((value ^ (1ULL << log2_f)) != 0ULL)); return static_cast<u32>(log2_f + static_cast<u64>((value ^ (1ULL << log2_f)) != 0ULL));
} }
template <typename T>
requires std::is_integral_v<T>
[[nodiscard]] T NextPow2(T value) {
return static_cast<T>(1ULL << ((8U * sizeof(T)) - std::countl_zero(value - 1U)));
}
} // namespace Common } // namespace Common

View file

@ -10,8 +10,14 @@ set(SHADER_FILES
astc_decoder.comp astc_decoder.comp
block_linear_unswizzle_2d.comp block_linear_unswizzle_2d.comp
block_linear_unswizzle_3d.comp block_linear_unswizzle_3d.comp
convert_abgr8_to_d24s8.frag
convert_b10g11r11_to_d24s8.frag
convert_d24s8_to_abgr8.frag
convert_d24s8_to_b10g11r11.frag
convert_d24s8_to_r16g16.frag
convert_depth_to_float.frag convert_depth_to_float.frag
convert_float_to_depth.frag convert_float_to_depth.frag
convert_r16g16_to_d24s8.frag
full_screen_triangle.vert full_screen_triangle.vert
fxaa.frag fxaa.frag
fxaa.vert fxaa.vert

View file

@ -0,0 +1,17 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
#extension GL_ARB_shader_stencil_export : require
layout(binding = 0) uniform sampler2D color_texture;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
uvec4 color = uvec4(texelFetch(color_texture, coord, 0).rgba * (exp2(8) - 1.0f));
uint depth_unorm = (color.r << 16) | (color.g << 8) | color.b;
gl_FragDepth = float(depth_unorm) / (exp2(24.0) - 1.0f);
gl_FragStencilRefARB = int(color.a);
}

View file

@ -0,0 +1,19 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
#extension GL_ARB_shader_stencil_export : require
layout(binding = 0) uniform sampler2D color_texture;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
vec4 color = texelFetch(color_texture, coord, 0).rgba;
uint depth_stencil_unorm = (uint(color.b * (exp2(10) - 1.0f)) << 22)
| (uint(color.g * (exp2(11) - 1.0f)) << 11)
| (uint(color.r * (exp2(11) - 1.0f)));
gl_FragDepth = float(depth_stencil_unorm >> 8) / (exp2(24.0) - 1.0f);
gl_FragStencilRefARB = int(depth_stencil_unorm & 0x00FF);
}

View file

@ -0,0 +1,21 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
layout(binding = 0) uniform sampler2D depth_tex;
layout(binding = 1) uniform isampler2D stencil_tex;
layout(location = 0) out vec4 color;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
uint depth = uint(textureLod(depth_tex, coord, 0).r * (exp2(24.0) - 1.0f));
uint stencil = uint(textureLod(stencil_tex, coord, 0).r);
color.r = float(depth >> 16) / (exp2(8) - 1.0);
color.g = float((depth >> 8) & 0x00FF) / (exp2(8) - 1.0);
color.b = float(depth & 0x00FF) / (exp2(8) - 1.0);
color.a = float(stencil) / (exp2(8) - 1.0);
}

View file

@ -0,0 +1,21 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
layout(binding = 0) uniform sampler2D depth_tex;
layout(binding = 1) uniform isampler2D stencil_tex;
layout(location = 0) out vec4 color;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
uint depth = uint(textureLod(depth_tex, coord, 0).r * (exp2(24.0) - 1.0f));
uint stencil = uint(textureLod(stencil_tex, coord, 0).r);
color.b = float(depth >> 22) / (exp2(10) - 1.0);
color.g = float((depth >> 11) & 0x00FF) / (exp2(11) - 1.0);
color.r = float(depth & 0x00FF) / (exp2(11) - 1.0);
color.a = 1.0f;
}

View file

@ -0,0 +1,21 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
layout(binding = 0) uniform sampler2D depth_tex;
layout(binding = 1) uniform isampler2D stencil_tex;
layout(location = 0) out vec4 color;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
uint depth = uint(textureLod(depth_tex, coord, 0).r * (exp2(24.0) - 1.0f));
uint stencil = uint(textureLod(stencil_tex, coord, 0).r);
color.r = float(depth >> 16) / (exp2(16) - 1.0);
color.g = float((depth >> 16) & 0x00FF) / (exp2(16) - 1.0);
color.b = 0.0f;
color.a = 1.0f;
}

View file

@ -0,0 +1,18 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#version 450
#extension GL_ARB_shader_stencil_export : require
layout(binding = 0) uniform sampler2D color_texture;
void main() {
ivec2 coord = ivec2(gl_FragCoord.xy);
vec4 color = texelFetch(color_texture, coord, 0).rgba;
uint depth_stencil_unorm = (uint(color.r * (exp2(16) - 1.0f)) << 16)
| (uint(color.g * (exp2(16) - 1.0f)) << 16);
gl_FragDepth = float(depth_stencil_unorm >> 8) / (exp2(24.0) - 1.0f);
gl_FragStencilRefARB = int(depth_stencil_unorm & 0x00FF);
}

View file

@ -9,6 +9,7 @@
#include <glad/glad.h> #include <glad/glad.h>
#include "common/bit_util.h"
#include "common/literals.h" #include "common/literals.h"
#include "common/settings.h" #include "common/settings.h"
#include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_device.h"
@ -398,9 +399,6 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
return GL_R32UI; return GL_R32UI;
} }
[[nodiscard]] u32 NextPow2(u32 value) {
return 1U << (32U - std::countl_zero(value - 1U));
}
} // Anonymous namespace } // Anonymous namespace
ImageBufferMap::~ImageBufferMap() { ImageBufferMap::~ImageBufferMap() {
@ -527,7 +525,7 @@ void TextureCacheRuntime::CopyImage(Image& dst_image, Image& src_image,
} }
} }
void TextureCacheRuntime::ConvertImage(Image& dst, Image& src, void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
std::span<const VideoCommon::ImageCopy> copies) { std::span<const VideoCommon::ImageCopy> copies) {
LOG_DEBUG(Render_OpenGL, "Converting {} to {}", src.info.format, dst.info.format); LOG_DEBUG(Render_OpenGL, "Converting {} to {}", src.info.format, dst.info.format);
format_conversion_pass.ConvertImage(dst, src, copies); format_conversion_pass.ConvertImage(dst, src, copies);
@ -1333,7 +1331,7 @@ void FormatConversionPass::ConvertImage(Image& dst_image, Image& src_image,
const u32 copy_size = region.width * region.height * region.depth * img_bpp; const u32 copy_size = region.width * region.height * region.depth * img_bpp;
if (pbo_size < copy_size) { if (pbo_size < copy_size) {
intermediate_pbo.Create(); intermediate_pbo.Create();
pbo_size = NextPow2(copy_size); pbo_size = Common::NextPow2(copy_size);
glNamedBufferData(intermediate_pbo.handle, pbo_size, nullptr, GL_STREAM_COPY); glNamedBufferData(intermediate_pbo.handle, pbo_size, nullptr, GL_STREAM_COPY);
} }
// Copy from source to PBO // Copy from source to PBO

View file

@ -84,9 +84,13 @@ public:
u64 GetDeviceLocalMemory() const; u64 GetDeviceLocalMemory() const;
bool ShouldReinterpret([[maybe_unused]] Image& dst, [[maybe_unused]] Image& src) {
return true;
}
void CopyImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies); void CopyImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies);
void ConvertImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies); void ReinterpretImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies);
void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view, bool rescaled) { void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view, bool rescaled) {
UNIMPLEMENTED(); UNIMPLEMENTED();
@ -339,7 +343,6 @@ struct TextureCacheParams {
static constexpr bool FRAMEBUFFER_BLITS = true; static constexpr bool FRAMEBUFFER_BLITS = true;
static constexpr bool HAS_EMULATED_COPIES = true; static constexpr bool HAS_EMULATED_COPIES = true;
static constexpr bool HAS_DEVICE_MEMORY_INFO = true; static constexpr bool HAS_DEVICE_MEMORY_INFO = true;
static constexpr bool HAS_PIXEL_FORMAT_CONVERSIONS = true;
using Runtime = OpenGL::TextureCacheRuntime; using Runtime = OpenGL::TextureCacheRuntime;
using Image = OpenGL::Image; using Image = OpenGL::Image;

View file

@ -4,8 +4,14 @@
#include <algorithm> #include <algorithm>
#include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h"
#include "video_core/host_shaders/convert_b10g11r11_to_d24s8_frag_spv.h"
#include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h"
#include "video_core/host_shaders/convert_d24s8_to_b10g11r11_frag_spv.h"
#include "video_core/host_shaders/convert_d24s8_to_r16g16_frag_spv.h"
#include "video_core/host_shaders/convert_depth_to_float_frag_spv.h" #include "video_core/host_shaders/convert_depth_to_float_frag_spv.h"
#include "video_core/host_shaders/convert_float_to_depth_frag_spv.h" #include "video_core/host_shaders/convert_float_to_depth_frag_spv.h"
#include "video_core/host_shaders/convert_r16g16_to_d24s8_frag_spv.h"
#include "video_core/host_shaders/full_screen_triangle_vert_spv.h" #include "video_core/host_shaders/full_screen_triangle_vert_spv.h"
#include "video_core/host_shaders/vulkan_blit_color_float_frag_spv.h" #include "video_core/host_shaders/vulkan_blit_color_float_frag_spv.h"
#include "video_core/host_shaders/vulkan_blit_depth_stencil_frag_spv.h" #include "video_core/host_shaders/vulkan_blit_depth_stencil_frag_spv.h"
@ -354,6 +360,12 @@ BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
blit_color_to_color_frag(BuildShader(device, VULKAN_BLIT_COLOR_FLOAT_FRAG_SPV)), blit_color_to_color_frag(BuildShader(device, VULKAN_BLIT_COLOR_FLOAT_FRAG_SPV)),
convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)), convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)),
convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)), convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)),
convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)),
convert_b10g11r11_to_d24s8_frag(BuildShader(device, CONVERT_B10G11R11_TO_D24S8_FRAG_SPV)),
convert_r16g16_to_d24s8_frag(BuildShader(device, CONVERT_R16G16_TO_D24S8_FRAG_SPV)),
convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)),
convert_d24s8_to_b10g11r11_frag(BuildShader(device, CONVERT_D24S8_TO_B10G11R11_FRAG_SPV)),
convert_d24s8_to_r16g16_frag(BuildShader(device, CONVERT_D24S8_TO_R16G16_FRAG_SPV)),
linear_sampler(device.GetLogical().CreateSampler(SAMPLER_CREATE_INFO<VK_FILTER_LINEAR>)), linear_sampler(device.GetLogical().CreateSampler(SAMPLER_CREATE_INFO<VK_FILTER_LINEAR>)),
nearest_sampler(device.GetLogical().CreateSampler(SAMPLER_CREATE_INFO<VK_FILTER_NEAREST>)) { nearest_sampler(device.GetLogical().CreateSampler(SAMPLER_CREATE_INFO<VK_FILTER_NEAREST>)) {
if (device.IsExtShaderStencilExportSupported()) { if (device.IsExtShaderStencilExportSupported()) {
@ -448,6 +460,59 @@ void BlitImageHelper::ConvertR16ToD16(const Framebuffer* dst_framebuffer,
Convert(*convert_r16_to_d16_pipeline, dst_framebuffer, src_image_view, up_scale, down_shift); Convert(*convert_r16_to_d16_pipeline, dst_framebuffer, src_image_view, up_scale, down_shift);
} }
void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale,
u32 down_shift) {
ConvertPipelineDepthTargetEx(convert_abgr8_to_d24s8_pipeline, dst_framebuffer->RenderPass(),
convert_abgr8_to_d24s8_frag, true);
Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view, up_scale,
down_shift);
}
void BlitImageHelper::ConvertB10G11R11ToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale,
u32 down_shift) {
ConvertPipelineDepthTargetEx(convert_b10g11r11_to_d24s8_pipeline, dst_framebuffer->RenderPass(),
convert_b10g11r11_to_d24s8_frag, true);
Convert(*convert_b10g11r11_to_d24s8_pipeline, dst_framebuffer, src_image_view, up_scale,
down_shift);
}
void BlitImageHelper::ConvertR16G16ToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale,
u32 down_shift) {
ConvertPipelineDepthTargetEx(convert_r16g16_to_d24s8_pipeline, dst_framebuffer->RenderPass(),
convert_r16g16_to_d24s8_frag, true);
Convert(*convert_r16g16_to_d24s8_pipeline, dst_framebuffer, src_image_view, up_scale,
down_shift);
}
void BlitImageHelper::ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer,
ImageView& src_image_view, u32 up_scale, u32 down_shift) {
ConvertPipelineColorTargetEx(convert_d24s8_to_abgr8_pipeline, dst_framebuffer->RenderPass(),
convert_d24s8_to_abgr8_frag, false);
ConvertDepthStencil(*convert_d24s8_to_abgr8_pipeline, dst_framebuffer, src_image_view, up_scale,
down_shift);
}
void BlitImageHelper::ConvertD24S8ToB10G11R11(const Framebuffer* dst_framebuffer,
ImageView& src_image_view, u32 up_scale,
u32 down_shift) {
ConvertPipelineColorTargetEx(convert_d24s8_to_b10g11r11_pipeline, dst_framebuffer->RenderPass(),
convert_d24s8_to_b10g11r11_frag, false);
ConvertDepthStencil(*convert_d24s8_to_b10g11r11_pipeline, dst_framebuffer, src_image_view,
up_scale, down_shift);
}
void BlitImageHelper::ConvertD24S8ToR16G16(const Framebuffer* dst_framebuffer,
ImageView& src_image_view, u32 up_scale,
u32 down_shift) {
ConvertPipelineColorTargetEx(convert_d24s8_to_r16g16_pipeline, dst_framebuffer->RenderPass(),
convert_d24s8_to_r16g16_frag, false);
ConvertDepthStencil(*convert_d24s8_to_r16g16_pipeline, dst_framebuffer, src_image_view,
up_scale, down_shift);
}
void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer, void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale, u32 down_shift) { const ImageView& src_image_view, u32 up_scale, u32 down_shift) {
const VkPipelineLayout layout = *one_texture_pipeline_layout; const VkPipelineLayout layout = *one_texture_pipeline_layout;
@ -495,6 +560,54 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
scheduler.InvalidateState(); scheduler.InvalidateState();
} }
void BlitImageHelper::ConvertDepthStencil(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
ImageView& src_image_view, u32 up_scale, u32 down_shift) {
const VkPipelineLayout layout = *two_textures_pipeline_layout;
const VkImageView src_depth_view = src_image_view.DepthView();
const VkImageView src_stencil_view = src_image_view.StencilView();
const VkSampler sampler = *nearest_sampler;
const VkExtent2D extent{
.width = std::max((src_image_view.size.width * up_scale) >> down_shift, 1U),
.height = std::max((src_image_view.size.height * up_scale) >> down_shift, 1U),
};
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_depth_view, src_stencil_view, extent, up_scale,
down_shift, this](vk::CommandBuffer cmdbuf) {
const VkOffset2D offset{
.x = 0,
.y = 0,
};
const VkViewport viewport{
.x = 0.0f,
.y = 0.0f,
.width = static_cast<float>(extent.width),
.height = static_cast<float>(extent.height),
.minDepth = 0.0f,
.maxDepth = 0.0f,
};
const VkRect2D scissor{
.offset = offset,
.extent = extent,
};
const PushConstants push_constants{
.tex_scale = {viewport.width, viewport.height},
.tex_offset = {0.0f, 0.0f},
};
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view);
// TODO: Barriers
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
nullptr);
cmdbuf.SetViewport(0, viewport);
cmdbuf.SetScissor(0, scissor);
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_VERTEX_BIT, push_constants);
cmdbuf.Draw(3, 1, 0, 0);
});
scheduler.InvalidateState();
}
VkPipeline BlitImageHelper::FindOrEmplaceColorPipeline(const BlitImagePipelineKey& key) { VkPipeline BlitImageHelper::FindOrEmplaceColorPipeline(const BlitImagePipelineKey& key) {
const auto it = std::ranges::find(blit_color_keys, key); const auto it = std::ranges::find(blit_color_keys, key);
if (it != blit_color_keys.end()) { if (it != blit_color_keys.end()) {
@ -636,4 +749,62 @@ void BlitImageHelper::ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRend
}); });
} }
void BlitImageHelper::ConvertPipelineColorTargetEx(vk::Pipeline& pipeline, VkRenderPass renderpass,
vk::ShaderModule& module, bool single_texture) {
if (pipeline) {
return;
}
const std::array stages = MakeStages(*full_screen_vert, *module);
pipeline = device.GetLogical().CreateGraphicsPipeline({
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stageCount = static_cast<u32>(stages.size()),
.pStages = stages.data(),
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pTessellationState = nullptr,
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.pMultisampleState = &PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pDepthStencilState = nullptr,
.pColorBlendState = &PIPELINE_COLOR_BLEND_STATE_GENERIC_CREATE_INFO,
.pDynamicState = &PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.layout = single_texture ? *one_texture_pipeline_layout : *two_textures_pipeline_layout,
.renderPass = renderpass,
.subpass = 0,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = 0,
});
}
void BlitImageHelper::ConvertPipelineDepthTargetEx(vk::Pipeline& pipeline, VkRenderPass renderpass,
vk::ShaderModule& module, bool single_texture) {
if (pipeline) {
return;
}
const std::array stages = MakeStages(*full_screen_vert, *module);
pipeline = device.GetLogical().CreateGraphicsPipeline({
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stageCount = static_cast<u32>(stages.size()),
.pStages = stages.data(),
.pVertexInputState = &PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pInputAssemblyState = &PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pTessellationState = nullptr,
.pViewportState = &PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.pRasterizationState = &PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.pMultisampleState = &PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pDepthStencilState = &PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.pColorBlendState = &PIPELINE_COLOR_BLEND_STATE_EMPTY_CREATE_INFO,
.pDynamicState = &PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.layout = single_texture ? *one_texture_pipeline_layout : *two_textures_pipeline_layout,
.renderPass = renderpass,
.subpass = 0,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = 0,
});
}
} // namespace Vulkan } // namespace Vulkan

View file

@ -56,10 +56,31 @@ public:
void ConvertR16ToD16(const Framebuffer* dst_framebuffer, const ImageView& src_image_view, void ConvertR16ToD16(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
u32 up_scale, u32 down_shift); u32 up_scale, u32 down_shift);
void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
u32 up_scale, u32 down_shift);
void ConvertB10G11R11ToD24S8(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale, u32 down_shift);
void ConvertR16G16ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
u32 up_scale, u32 down_shift);
void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view,
u32 up_scale, u32 down_shift);
void ConvertD24S8ToB10G11R11(const Framebuffer* dst_framebuffer, ImageView& src_image_view,
u32 up_scale, u32 down_shift);
void ConvertD24S8ToR16G16(const Framebuffer* dst_framebuffer, ImageView& src_image_view,
u32 up_scale, u32 down_shift);
private: private:
void Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer, void Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view, u32 up_scale, u32 down_shift); const ImageView& src_image_view, u32 up_scale, u32 down_shift);
void ConvertDepthStencil(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
ImageView& src_image_view, u32 up_scale, u32 down_shift);
[[nodiscard]] VkPipeline FindOrEmplaceColorPipeline(const BlitImagePipelineKey& key); [[nodiscard]] VkPipeline FindOrEmplaceColorPipeline(const BlitImagePipelineKey& key);
[[nodiscard]] VkPipeline FindOrEmplaceDepthStencilPipeline(const BlitImagePipelineKey& key); [[nodiscard]] VkPipeline FindOrEmplaceDepthStencilPipeline(const BlitImagePipelineKey& key);
@ -68,6 +89,12 @@ private:
void ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRenderPass renderpass); void ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRenderPass renderpass);
void ConvertPipelineColorTargetEx(vk::Pipeline& pipeline, VkRenderPass renderpass,
vk::ShaderModule& module, bool single_texture);
void ConvertPipelineDepthTargetEx(vk::Pipeline& pipeline, VkRenderPass renderpass,
vk::ShaderModule& module, bool single_texture);
const Device& device; const Device& device;
VKScheduler& scheduler; VKScheduler& scheduler;
StateTracker& state_tracker; StateTracker& state_tracker;
@ -83,6 +110,12 @@ private:
vk::ShaderModule blit_depth_stencil_frag; vk::ShaderModule blit_depth_stencil_frag;
vk::ShaderModule convert_depth_to_float_frag; vk::ShaderModule convert_depth_to_float_frag;
vk::ShaderModule convert_float_to_depth_frag; vk::ShaderModule convert_float_to_depth_frag;
vk::ShaderModule convert_abgr8_to_d24s8_frag;
vk::ShaderModule convert_b10g11r11_to_d24s8_frag;
vk::ShaderModule convert_r16g16_to_d24s8_frag;
vk::ShaderModule convert_d24s8_to_abgr8_frag;
vk::ShaderModule convert_d24s8_to_b10g11r11_frag;
vk::ShaderModule convert_d24s8_to_r16g16_frag;
vk::Sampler linear_sampler; vk::Sampler linear_sampler;
vk::Sampler nearest_sampler; vk::Sampler nearest_sampler;
@ -94,6 +127,12 @@ private:
vk::Pipeline convert_r32_to_d32_pipeline; vk::Pipeline convert_r32_to_d32_pipeline;
vk::Pipeline convert_d16_to_r16_pipeline; vk::Pipeline convert_d16_to_r16_pipeline;
vk::Pipeline convert_r16_to_d16_pipeline; vk::Pipeline convert_r16_to_d16_pipeline;
vk::Pipeline convert_abgr8_to_d24s8_pipeline;
vk::Pipeline convert_b10g11r11_to_d24s8_pipeline;
vk::Pipeline convert_r16g16_to_d24s8_pipeline;
vk::Pipeline convert_d24s8_to_abgr8_pipeline;
vk::Pipeline convert_d24s8_to_b10g11r11_pipeline;
vk::Pipeline convert_d24s8_to_r16g16_pipeline;
}; };
} // namespace Vulkan } // namespace Vulkan

View file

@ -8,6 +8,7 @@
#include <vector> #include <vector>
#include "common/bit_cast.h" #include "common/bit_cast.h"
#include "common/bit_util.h"
#include "common/settings.h" #include "common/settings.h"
#include "video_core/engines/fermi_2d.h" #include "video_core/engines/fermi_2d.h"
@ -313,6 +314,19 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
}; };
} }
[[nodiscard]] VkBufferImageCopy MakeBufferImageCopy(const VideoCommon::ImageCopy& copy, bool is_src,
VkImageAspectFlags aspect_mask) noexcept {
return VkBufferImageCopy{
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = MakeImageSubresourceLayers(
is_src ? copy.src_subresource : copy.dst_subresource, aspect_mask),
.imageOffset = MakeOffset3D(is_src ? copy.src_offset : copy.dst_offset),
.imageExtent = MakeExtent3D(copy.extent),
};
}
[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies( [[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) { std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
std::vector<VkBufferCopy> result(copies.size()); std::vector<VkBufferCopy> result(copies.size());
@ -759,6 +773,163 @@ StagingBufferRef TextureCacheRuntime::DownloadStagingBuffer(size_t size) {
return staging_buffer_pool.Request(size, MemoryUsage::Download); return staging_buffer_pool.Request(size, MemoryUsage::Download);
} }
bool TextureCacheRuntime::ShouldReinterpret(Image& dst, Image& src) {
if (VideoCore::Surface::GetFormatType(dst.info.format) ==
VideoCore::Surface::SurfaceType::DepthStencil) {
return !device.IsExtShaderStencilExportSupported();
}
return false;
}
VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) {
const auto level = (8 * sizeof(size_t)) - std::countl_zero(needed_size - 1ULL);
if (buffer_commits[level]) {
return *buffers[level];
}
const auto new_size = Common::NextPow2(needed_size);
VkBufferUsageFlags flags = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffers[level] = device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = new_size,
.usage = flags,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
buffer_commits[level] = std::make_unique<MemoryCommit>(
memory_allocator.Commit(buffers[level], MemoryUsage::DeviceLocal));
return *buffers[level];
}
void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
std::span<const VideoCommon::ImageCopy> copies) {
std::vector<VkBufferImageCopy> vk_in_copies(copies.size());
std::vector<VkBufferImageCopy> vk_out_copies(copies.size());
const VkImageAspectFlags src_aspect_mask = src.AspectMask();
const VkImageAspectFlags dst_aspect_mask = dst.AspectMask();
std::ranges::transform(copies, vk_in_copies.begin(), [src_aspect_mask](const auto& copy) {
return MakeBufferImageCopy(copy, true, src_aspect_mask);
});
std::ranges::transform(copies, vk_out_copies.begin(), [dst_aspect_mask](const auto& copy) {
return MakeBufferImageCopy(copy, false, dst_aspect_mask);
});
const u32 img_bpp = BytesPerBlock(src.info.format);
size_t total_size = 0;
for (const auto& copy : copies) {
total_size += copy.extent.width * copy.extent.height * copy.extent.depth * img_bpp;
}
const VkBuffer copy_buffer = GetTemporaryBuffer(total_size);
const VkImage dst_image = dst.Handle();
const VkImage src_image = src.Handle();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dst_image, src_image, copy_buffer, src_aspect_mask, dst_aspect_mask,
vk_in_copies, vk_out_copies](vk::CommandBuffer cmdbuf) {
RangedBarrierRange dst_range;
RangedBarrierRange src_range;
for (const VkBufferImageCopy& copy : vk_in_copies) {
src_range.AddLayers(copy.imageSubresource);
}
for (const VkBufferImageCopy& copy : vk_out_copies) {
dst_range.AddLayers(copy.imageSubresource);
}
static constexpr VkMemoryBarrier READ_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
};
static constexpr VkMemoryBarrier WRITE_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
};
const std::array pre_barriers{
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(src_aspect_mask),
},
};
const std::array middle_in_barrier{
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(src_aspect_mask),
},
};
const std::array middle_out_barrier{
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask),
},
};
const std::array post_barriers{
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask),
},
};
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, {}, {}, pre_barriers);
cmdbuf.CopyImageToBuffer(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, copy_buffer,
vk_in_copies);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, WRITE_BARRIER, nullptr, middle_in_barrier);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, READ_BARRIER, {}, middle_out_barrier);
cmdbuf.CopyBufferToImage(copy_buffer, dst_image, VK_IMAGE_LAYOUT_GENERAL, vk_out_copies);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, {}, {}, post_barriers);
});
}
void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
const Region2D& dst_region, const Region2D& src_region, const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter, Tegra::Engines::Fermi2D::Filter filter,
@ -886,6 +1057,22 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
return blit_image_helper.ConvertD16ToR16(dst, src_view, up_scale, down_shift); return blit_image_helper.ConvertD16ToR16(dst, src_view, up_scale, down_shift);
} }
break; break;
case PixelFormat::A8B8G8R8_UNORM:
case PixelFormat::B8G8R8A8_UNORM:
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view, up_scale, down_shift);
}
break;
case PixelFormat::B10G11R11_FLOAT:
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
return blit_image_helper.ConvertD24S8ToB10G11R11(dst, src_view, up_scale, down_shift);
}
break;
case PixelFormat::R16G16_UNORM:
if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) {
return blit_image_helper.ConvertD24S8ToR16G16(dst, src_view, up_scale, down_shift);
}
break;
case PixelFormat::R32_FLOAT: case PixelFormat::R32_FLOAT:
if (src_view.format == PixelFormat::D32_FLOAT) { if (src_view.format == PixelFormat::D32_FLOAT) {
return blit_image_helper.ConvertD32ToR32(dst, src_view, up_scale, down_shift); return blit_image_helper.ConvertD32ToR32(dst, src_view, up_scale, down_shift);
@ -896,6 +1083,18 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im
return blit_image_helper.ConvertR16ToD16(dst, src_view, up_scale, down_shift); return blit_image_helper.ConvertR16ToD16(dst, src_view, up_scale, down_shift);
} }
break; break;
case PixelFormat::S8_UINT_D24_UNORM:
if (src_view.format == PixelFormat::A8B8G8R8_UNORM ||
src_view.format == PixelFormat::B8G8R8A8_UNORM) {
return blit_image_helper.ConvertABGR8ToD24S8(dst, src_view, up_scale, down_shift);
}
if (src_view.format == PixelFormat::B10G11R11_FLOAT) {
return blit_image_helper.ConvertB10G11R11ToD24S8(dst, src_view, up_scale, down_shift);
}
if (src_view.format == PixelFormat::R16G16_UNORM) {
return blit_image_helper.ConvertR16G16ToD24S8(dst, src_view, up_scale, down_shift);
}
break;
case PixelFormat::D32_FLOAT: case PixelFormat::D32_FLOAT:
if (src_view.format == PixelFormat::R32_FLOAT) { if (src_view.format == PixelFormat::R32_FLOAT) {
return blit_image_helper.ConvertR32ToD32(dst, src_view, up_scale, down_shift); return blit_image_helper.ConvertR32ToD32(dst, src_view, up_scale, down_shift);

View file

@ -61,6 +61,10 @@ public:
void CopyImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies); void CopyImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies);
bool ShouldReinterpret(Image& dst, Image& src);
void ReinterpretImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies);
void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view, bool rescaled); void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view, bool rescaled);
bool CanAccelerateImageUpload(Image&) const noexcept { bool CanAccelerateImageUpload(Image&) const noexcept {
@ -82,6 +86,8 @@ public:
return true; return true;
} }
[[nodiscard]] VkBuffer GetTemporaryBuffer(size_t needed_size);
const Device& device; const Device& device;
VKScheduler& scheduler; VKScheduler& scheduler;
MemoryAllocator& memory_allocator; MemoryAllocator& memory_allocator;
@ -90,6 +96,10 @@ public:
ASTCDecoderPass& astc_decoder_pass; ASTCDecoderPass& astc_decoder_pass;
RenderPassCache& render_pass_cache; RenderPassCache& render_pass_cache;
const Settings::ResolutionScalingInfo& resolution; const Settings::ResolutionScalingInfo& resolution;
constexpr static size_t indexing_slots = 8 * sizeof(size_t);
std::array<vk::Buffer, indexing_slots> buffers{};
std::array<std::unique_ptr<MemoryCommit>, indexing_slots> buffer_commits{};
}; };
class Image : public VideoCommon::ImageBase { class Image : public VideoCommon::ImageBase {
@ -316,7 +326,6 @@ struct TextureCacheParams {
static constexpr bool FRAMEBUFFER_BLITS = false; static constexpr bool FRAMEBUFFER_BLITS = false;
static constexpr bool HAS_EMULATED_COPIES = false; static constexpr bool HAS_EMULATED_COPIES = false;
static constexpr bool HAS_DEVICE_MEMORY_INFO = true; static constexpr bool HAS_DEVICE_MEMORY_INFO = true;
static constexpr bool HAS_PIXEL_FORMAT_CONVERSIONS = false;
using Runtime = Vulkan::TextureCacheRuntime; using Runtime = Vulkan::TextureCacheRuntime;
using Image = Vulkan::Image; using Image = Vulkan::Image;

View file

@ -475,6 +475,7 @@ void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
const BlitImages images = GetBlitImages(dst, src); const BlitImages images = GetBlitImages(dst, src);
const ImageId dst_id = images.dst_id; const ImageId dst_id = images.dst_id;
const ImageId src_id = images.src_id; const ImageId src_id = images.src_id;
PrepareImage(src_id, false, false); PrepareImage(src_id, false, false);
PrepareImage(dst_id, true, false); PrepareImage(dst_id, true, false);
@ -758,7 +759,8 @@ ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
return ImageId{}; return ImageId{};
} }
} }
const bool broken_views = runtime.HasBrokenTextureViewFormats(); const bool broken_views =
runtime.HasBrokenTextureViewFormats() || True(options & RelaxedOptions::ForceBrokenViews);
const bool native_bgr = runtime.HasNativeBgr(); const bool native_bgr = runtime.HasNativeBgr();
ImageId image_id; ImageId image_id;
const auto lambda = [&](ImageId existing_image_id, ImageBase& existing_image) { const auto lambda = [&](ImageId existing_image_id, ImageBase& existing_image) {
@ -1094,12 +1096,13 @@ typename TextureCache<P>::BlitImages TextureCache<P>::GetBlitImages(
if (GetFormatType(dst_info.format) != GetFormatType(src_info.format)) { if (GetFormatType(dst_info.format) != GetFormatType(src_info.format)) {
continue; continue;
} }
if (!dst_id) { RelaxedOptions find_options{};
dst_id = InsertImage(dst_info, dst_addr, RelaxedOptions{}); if (src_info.num_samples > 1) {
} // it's a resolve, we must enforce the same format.
if (!src_id) { find_options = RelaxedOptions::ForceBrokenViews;
src_id = InsertImage(src_info, src_addr, RelaxedOptions{});
} }
src_id = FindOrInsertImage(src_info, src_addr, find_options);
dst_id = FindOrInsertImage(dst_info, dst_addr, find_options);
} while (has_deleted_images); } while (has_deleted_images);
return BlitImages{ return BlitImages{
.dst_id = dst_id, .dst_id = dst_id,
@ -1759,8 +1762,8 @@ void TextureCache<P>::CopyImage(ImageId dst_id, ImageId src_id, std::vector<Imag
} }
UNIMPLEMENTED_IF(dst.info.type != ImageType::e2D); UNIMPLEMENTED_IF(dst.info.type != ImageType::e2D);
UNIMPLEMENTED_IF(src.info.type != ImageType::e2D); UNIMPLEMENTED_IF(src.info.type != ImageType::e2D);
if constexpr (HAS_PIXEL_FORMAT_CONVERSIONS) { if (runtime.ShouldReinterpret(dst, src)) {
return runtime.ConvertImage(dst, src, copies); return runtime.ReinterpretImage(dst, src, copies);
} }
for (const ImageCopy& copy : copies) { for (const ImageCopy& copy : copies) {
UNIMPLEMENTED_IF(copy.dst_subresource.num_layers != 1); UNIMPLEMENTED_IF(copy.dst_subresource.num_layers != 1);

View file

@ -59,8 +59,6 @@ class TextureCache {
static constexpr bool HAS_EMULATED_COPIES = P::HAS_EMULATED_COPIES; static constexpr bool HAS_EMULATED_COPIES = P::HAS_EMULATED_COPIES;
/// True when the API can provide info about the memory of the device. /// True when the API can provide info about the memory of the device.
static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
/// True when the API provides utilities for pixel format conversions.
static constexpr bool HAS_PIXEL_FORMAT_CONVERSIONS = P::HAS_PIXEL_FORMAT_CONVERSIONS;
static constexpr u64 DEFAULT_EXPECTED_MEMORY = 1_GiB; static constexpr u64 DEFAULT_EXPECTED_MEMORY = 1_GiB;
static constexpr u64 DEFAULT_CRITICAL_MEMORY = 2_GiB; static constexpr u64 DEFAULT_CRITICAL_MEMORY = 2_GiB;

View file

@ -54,6 +54,7 @@ enum class RelaxedOptions : u32 {
Size = 1 << 0, Size = 1 << 0,
Format = 1 << 1, Format = 1 << 1,
Samples = 1 << 2, Samples = 1 << 2,
ForceBrokenViews = 1 << 3,
}; };
DECLARE_ENUM_FLAG_OPERATORS(RelaxedOptions) DECLARE_ENUM_FLAG_OPERATORS(RelaxedOptions)

View file

@ -1151,19 +1151,40 @@ bool IsSubresource(const ImageInfo& candidate, const ImageBase& image, GPUVAddr
void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst, void DeduceBlitImages(ImageInfo& dst_info, ImageInfo& src_info, const ImageBase* dst,
const ImageBase* src) { const ImageBase* src) {
if (src && GetFormatType(src->info.format) != SurfaceType::ColorTexture) { bool is_resolve = false;
const auto original_src_format = src_info.format;
const auto original_dst_format = dst_info.format;
if (src) {
if (GetFormatType(src->info.format) != SurfaceType::ColorTexture) {
src_info.format = src->info.format; src_info.format = src->info.format;
} }
is_resolve = src->info.num_samples > 1;
src_info.num_samples = src->info.num_samples;
src_info.size = src->info.size;
}
if (dst && GetFormatType(dst->info.format) != SurfaceType::ColorTexture) { if (dst && GetFormatType(dst->info.format) != SurfaceType::ColorTexture) {
dst_info.format = dst->info.format; dst_info.format = dst->info.format;
} }
if (src && GetFormatType(src->info.format) != SurfaceType::ColorTexture) { if (src && GetFormatType(src->info.format) != SurfaceType::ColorTexture) {
if (dst) {
if (GetFormatType(dst->info.format) == SurfaceType::ColorTexture) {
src_info.format = original_src_format;
}
} else {
dst_info.format = src->info.format; dst_info.format = src->info.format;
} }
}
if (dst && GetFormatType(dst->info.format) != SurfaceType::ColorTexture) { if (dst && GetFormatType(dst->info.format) != SurfaceType::ColorTexture) {
if (src) {
if (GetFormatType(src->info.format) == SurfaceType::ColorTexture) {
dst_info.format = original_dst_format;
}
} else {
src_info.format = dst->info.format; src_info.format = dst->info.format;
} }
} }
ASSERT(!is_resolve || dst_info.format == src_info.format);
}
u32 MapSizeBytes(const ImageBase& image) { u32 MapSizeBytes(const ImageBase& image) {
if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { if (True(image.flags & ImageFlagBits::AcceleratedUpload)) {