renderer_vulkan: convert FSR to graphics pipeline

This commit is contained in:
Liam 2024-01-15 10:19:02 -05:00
parent b78900e956
commit 2ed9586130
17 changed files with 311 additions and 463 deletions

View file

@ -9,7 +9,7 @@ set(FIDELITYFX_FILES
) )
set(GLSL_INCLUDES set(GLSL_INCLUDES
fidelityfx_fsr.comp fidelityfx_fsr.frag
${FIDELITYFX_FILES} ${FIDELITYFX_FILES}
) )
@ -56,10 +56,11 @@ set(SHADER_FILES
vulkan_color_clear.frag vulkan_color_clear.frag
vulkan_color_clear.vert vulkan_color_clear.vert
vulkan_depthstencil_clear.frag vulkan_depthstencil_clear.frag
vulkan_fidelityfx_fsr_easu_fp16.comp vulkan_fidelityfx_fsr.vert
vulkan_fidelityfx_fsr_easu_fp32.comp vulkan_fidelityfx_fsr_easu_fp16.frag
vulkan_fidelityfx_fsr_rcas_fp16.comp vulkan_fidelityfx_fsr_easu_fp32.frag
vulkan_fidelityfx_fsr_rcas_fp32.comp vulkan_fidelityfx_fsr_rcas_fp16.frag
vulkan_fidelityfx_fsr_rcas_fp32.frag
vulkan_present.frag vulkan_present.frag
vulkan_present.vert vulkan_present.vert
vulkan_present_scaleforce_fp16.frag vulkan_present_scaleforce_fp16.frag

View file

@ -34,7 +34,6 @@ layout( push_constant ) uniform constants {
}; };
layout(set=0,binding=0) uniform sampler2D InputTexture; layout(set=0,binding=0) uniform sampler2D InputTexture;
layout(set=0,binding=1,rgba16f) uniform image2D OutputTexture;
#define A_GPU 1 #define A_GPU 1
#define A_GLSL 1 #define A_GLSL 1
@ -72,44 +71,40 @@ layout(set=0,binding=1,rgba16f) uniform image2D OutputTexture;
#include "ffx_fsr1.h" #include "ffx_fsr1.h"
void CurrFilter(AU2 pos) { #if USE_RCAS
#if USE_BILINEAR layout(location = 0) in vec2 frag_texcoord;
AF2 pp = (AF2(pos) * AF2_AU2(Const0.xy) + AF2_AU2(Const0.zw)) * AF2_AU2(Const1.xy) + AF2(0.5, -0.5) * AF2_AU2(Const1.zw);
imageStore(OutputTexture, ASU2(pos), textureLod(InputTexture, pp, 0.0));
#endif #endif
layout (location = 0) out vec4 frag_color;
void CurrFilter(AU2 pos) {
#if USE_EASU #if USE_EASU
#ifndef YUZU_USE_FP16 #ifndef YUZU_USE_FP16
AF3 c; AF3 c;
FsrEasuF(c, pos, Const0, Const1, Const2, Const3); FsrEasuF(c, pos, Const0, Const1, Const2, Const3);
imageStore(OutputTexture, ASU2(pos), AF4(c, 1)); frag_color = AF4(c, 1.0);
#else #else
AH3 c; AH3 c;
FsrEasuH(c, pos, Const0, Const1, Const2, Const3); FsrEasuH(c, pos, Const0, Const1, Const2, Const3);
imageStore(OutputTexture, ASU2(pos), AH4(c, 1)); frag_color = AH4(c, 1.0);
#endif #endif
#endif #endif
#if USE_RCAS #if USE_RCAS
#ifndef YUZU_USE_FP16 #ifndef YUZU_USE_FP16
AF3 c; AF3 c;
FsrRcasF(c.r, c.g, c.b, pos, Const0); FsrRcasF(c.r, c.g, c.b, pos, Const0);
imageStore(OutputTexture, ASU2(pos), AF4(c, 1)); frag_color = AF4(c, 1.0);
#else #else
AH3 c; AH3 c;
FsrRcasH(c.r, c.g, c.b, pos, Const0); FsrRcasH(c.r, c.g, c.b, pos, Const0);
imageStore(OutputTexture, ASU2(pos), AH4(c, 1)); frag_color = AH4(c, 1.0);
#endif #endif
#endif #endif
} }
layout(local_size_x=64) in;
void main() { void main() {
// Do remapping of local xy in workgroup for a more PS-like swizzle pattern. #if USE_RCAS
AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u); CurrFilter(AU2(frag_texcoord * vec2(textureSize(InputTexture, 0))));
CurrFilter(gxy); #else
gxy.x += 8u; CurrFilter(AU2(gl_FragCoord.xy));
CurrFilter(gxy); #endif
gxy.y += 8u;
CurrFilter(gxy);
gxy.x -= 8u;
CurrFilter(gxy);
} }

View file

@ -0,0 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#version 450
layout(location = 0) out vec2 texcoord;
void main() {
float x = float((gl_VertexIndex & 1) << 2);
float y = float((gl_VertexIndex & 2) << 1);
gl_Position = vec4(x - 1.0, y - 1.0, 0.0, 1.0);
texcoord = vec2(x, y) / 2.0;
}

View file

@ -7,4 +7,4 @@
#define YUZU_USE_FP16 #define YUZU_USE_FP16
#define USE_EASU 1 #define USE_EASU 1
#include "fidelityfx_fsr.comp" #include "fidelityfx_fsr.frag"

View file

@ -6,4 +6,4 @@
#define USE_EASU 1 #define USE_EASU 1
#include "fidelityfx_fsr.comp" #include "fidelityfx_fsr.frag"

View file

@ -7,4 +7,4 @@
#define YUZU_USE_FP16 #define YUZU_USE_FP16
#define USE_RCAS 1 #define USE_RCAS 1
#include "fidelityfx_fsr.comp" #include "fidelityfx_fsr.frag"

View file

@ -6,4 +6,4 @@
#define USE_RCAS 1 #define USE_RCAS 1
#include "fidelityfx_fsr.comp" #include "fidelityfx_fsr.frag"

View file

@ -12,16 +12,14 @@ class Scheduler;
class AntiAliasPass { class AntiAliasPass {
public: public:
virtual ~AntiAliasPass() = default; virtual ~AntiAliasPass() = default;
virtual VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, virtual void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) = 0; VkImageView* inout_image_view) = 0;
}; };
class NoAA final : public AntiAliasPass { class NoAA final : public AntiAliasPass {
public: public:
virtual VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) { VkImageView* inout_image_view) override {}
return source_image_view;
}
}; };
} // namespace Vulkan } // namespace Vulkan

View file

@ -6,11 +6,13 @@
#include "common/settings.h" #include "common/settings.h"
#include "video_core/fsr.h" #include "video_core/fsr.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp16_comp_spv.h" #include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp16_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp32_comp_spv.h" #include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp32_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp16_comp_spv.h" #include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp16_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp32_comp_spv.h" #include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp32_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_vert_spv.h"
#include "video_core/renderer_vulkan/present/fsr.h" #include "video_core/renderer_vulkan/present/fsr.h"
#include "video_core/renderer_vulkan/present/util.h"
#include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_device.h"
@ -18,403 +20,207 @@
namespace Vulkan { namespace Vulkan {
using namespace FSR; using namespace FSR;
FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image_count_, using PushConstants = std::array<u32, 4 * 4>;
VkExtent2D output_size_)
: device{device_}, memory_allocator{memory_allocator_}, image_count{image_count_}, FSR::FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
output_size{output_size_} { VkExtent2D extent)
: m_device{device}, m_memory_allocator{memory_allocator},
m_image_count{image_count}, m_extent{extent} {
CreateImages(); CreateImages();
CreateRenderPasses();
CreateSampler(); CreateSampler();
CreateShaders(); CreateShaders();
CreateDescriptorPool(); CreateDescriptorPool();
CreateDescriptorSetLayout(); CreateDescriptorSetLayout();
CreateDescriptorSets(); CreateDescriptorSets();
CreatePipelineLayout(); CreatePipelineLayouts();
CreatePipeline(); CreatePipelines();
}
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect) {
UpdateDescriptorSet(image_index, image_view);
scheduler.Record([this, image_index, input_image_extent, crop_rect](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier base_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = {},
.subresourceRange =
{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline);
const f32 input_image_width = static_cast<f32>(input_image_extent.width);
const f32 input_image_height = static_cast<f32>(input_image_extent.height);
const f32 output_image_width = static_cast<f32>(output_size.width);
const f32 output_image_height = static_cast<f32>(output_size.height);
const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
const f32 viewport_x = crop_rect.left * input_image_width;
const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
const f32 viewport_y = crop_rect.top * input_image_height;
std::array<u32, 4 * 4> push_constants;
FsrEasuConOffset(push_constants.data() + 0, push_constants.data() + 4,
push_constants.data() + 8, push_constants.data() + 12,
viewport_width, viewport_height, input_image_width, input_image_height,
output_image_width, output_image_height, viewport_x, viewport_y);
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
{
VkImageMemoryBarrier fsr_write_barrier = base_barrier;
fsr_write_barrier.image = *images[image_index];
fsr_write_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, fsr_write_barrier);
}
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_sets[image_index * 2], {});
cmdbuf.Dispatch(Common::DivCeil(output_size.width, 16u),
Common::DivCeil(output_size.height, 16u), 1);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *rcas_pipeline);
const float sharpening =
static_cast<float>(Settings::values.fsr_sharpening_slider.GetValue()) / 100.0f;
FsrRcasCon(push_constants.data(), sharpening);
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
{
std::array<VkImageMemoryBarrier, 2> barriers;
auto& fsr_read_barrier = barriers[0];
auto& blit_write_barrier = barriers[1];
fsr_read_barrier = base_barrier;
fsr_read_barrier.image = *images[image_index];
fsr_read_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
fsr_read_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
blit_write_barrier = base_barrier;
blit_write_barrier.image = *images[image_count + image_index];
blit_write_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
blit_write_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, {}, {}, barriers);
}
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_sets[image_index * 2 + 1], {});
cmdbuf.Dispatch(Common::DivCeil(output_size.width, 16u),
Common::DivCeil(output_size.height, 16u), 1);
{
std::array<VkImageMemoryBarrier, 1> barriers;
auto& blit_read_barrier = barriers[0];
blit_read_barrier = base_barrier;
blit_read_barrier.image = *images[image_count + image_index];
blit_read_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
blit_read_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, {}, {}, barriers);
}
});
return *image_views[image_count + image_index];
}
void FSR::CreateDescriptorPool() {
const std::array<VkDescriptorPoolSize, 2> pool_sizes{{
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = static_cast<u32>(image_count * 2),
},
{
.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = static_cast<u32>(image_count * 2),
},
}};
const VkDescriptorPoolCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.maxSets = static_cast<u32>(image_count * 2),
.poolSizeCount = static_cast<u32>(pool_sizes.size()),
.pPoolSizes = pool_sizes.data(),
};
descriptor_pool = device.GetLogical().CreateDescriptorPool(ci);
}
void FSR::CreateDescriptorSetLayout() {
const std::array<VkDescriptorSetLayoutBinding, 2> layout_bindings{{
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = sampler.address(),
},
{
.binding = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = sampler.address(),
},
}};
const VkDescriptorSetLayoutCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.bindingCount = static_cast<u32>(layout_bindings.size()),
.pBindings = layout_bindings.data(),
};
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(ci);
}
void FSR::CreateDescriptorSets() {
const u32 sets = static_cast<u32>(image_count * 2);
const std::vector layouts(sets, *descriptor_set_layout);
const VkDescriptorSetAllocateInfo ai{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = *descriptor_pool,
.descriptorSetCount = sets,
.pSetLayouts = layouts.data(),
};
descriptor_sets = descriptor_pool.Allocate(ai);
} }
void FSR::CreateImages() { void FSR::CreateImages() {
images.resize(image_count * 2); m_dynamic_images.resize(m_image_count);
image_views.resize(image_count * 2); for (auto& images : m_dynamic_images) {
images.images[Easu] =
for (size_t i = 0; i < image_count * 2; ++i) { CreateWrappedImage(m_memory_allocator, m_extent, VK_FORMAT_R16G16B16A16_SFLOAT);
images[i] = memory_allocator.CreateImage(VkImageCreateInfo{ images.images[Rcas] =
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, CreateWrappedImage(m_memory_allocator, m_extent, VK_FORMAT_R16G16B16A16_SFLOAT);
.pNext = nullptr, images.image_views[Easu] =
.flags = 0, CreateWrappedImageView(m_device, images.images[Easu], VK_FORMAT_R16G16B16A16_SFLOAT);
.imageType = VK_IMAGE_TYPE_2D, images.image_views[Rcas] =
.format = VK_FORMAT_R16G16B16A16_SFLOAT, CreateWrappedImageView(m_device, images.images[Rcas], VK_FORMAT_R16G16B16A16_SFLOAT);
.extent =
{
.width = output_size.width,
.height = output_size.height,
.depth = 1,
},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
});
image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = *images[i],
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
.components =
{
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
},
.subresourceRange =
{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
} }
} }
void FSR::CreatePipelineLayout() { void FSR::CreateRenderPasses() {
VkPushConstantRange push_const{ m_renderpass = CreateWrappedRenderPass(m_device, VK_FORMAT_R16G16B16A16_SFLOAT);
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
for (auto& images : m_dynamic_images) {
images.framebuffers[Easu] =
CreateWrappedFramebuffer(m_device, m_renderpass, images.image_views[Easu], m_extent);
images.framebuffers[Rcas] =
CreateWrappedFramebuffer(m_device, m_renderpass, images.image_views[Rcas], m_extent);
}
}
void FSR::CreateSampler() {
m_sampler = CreateBilinearSampler(m_device);
}
void FSR::CreateShaders() {
m_vert_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_VERT_SPV);
if (m_device.IsFloat16Supported()) {
m_easu_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_EASU_FP16_FRAG_SPV);
m_rcas_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_RCAS_FP16_FRAG_SPV);
} else {
m_easu_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_EASU_FP32_FRAG_SPV);
m_rcas_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_RCAS_FP32_FRAG_SPV);
}
}
void FSR::CreateDescriptorPool() {
// EASU: 1 descriptor
// RCAS: 1 descriptor
// 2 descriptors, 2 descriptor sets per invocation
m_descriptor_pool = CreateWrappedDescriptorPool(m_device, 2 * m_image_count, 2 * m_image_count);
}
void FSR::CreateDescriptorSetLayout() {
m_descriptor_set_layout =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
}
void FSR::CreateDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(MaxFsrStage, *m_descriptor_set_layout);
for (auto& images : m_dynamic_images) {
images.descriptor_sets = CreateWrappedDescriptorSets(m_descriptor_pool, layouts);
}
}
void FSR::CreatePipelineLayouts() {
const VkPushConstantRange range{
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.offset = 0, .offset = 0,
.size = sizeof(std::array<u32, 4 * 4>), .size = sizeof(PushConstants),
}; };
VkPipelineLayoutCreateInfo ci{ VkPipelineLayoutCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
.setLayoutCount = 1, .setLayoutCount = 1,
.pSetLayouts = descriptor_set_layout.address(), .pSetLayouts = m_descriptor_set_layout.address(),
.pushConstantRangeCount = 1, .pushConstantRangeCount = 1,
.pPushConstantRanges = &push_const, .pPushConstantRanges = &range,
}; };
pipeline_layout = device.GetLogical().CreatePipelineLayout(ci); m_pipeline_layout = m_device.GetLogical().CreatePipelineLayout(ci);
} }
void FSR::UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const { void FSR::CreatePipelines() {
const auto fsr_image_view = *image_views[image_index]; m_easu_pipeline = CreateWrappedPipeline(m_device, m_renderpass, m_pipeline_layout,
const auto blit_image_view = *image_views[image_count + image_index]; std::tie(m_vert_shader, m_easu_shader));
m_rcas_pipeline = CreateWrappedPipeline(m_device, m_renderpass, m_pipeline_layout,
const VkDescriptorImageInfo image_info{ std::tie(m_vert_shader, m_rcas_shader));
.sampler = VK_NULL_HANDLE,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
const VkDescriptorImageInfo fsr_image_info{
.sampler = VK_NULL_HANDLE,
.imageView = fsr_image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
const VkDescriptorImageInfo blit_image_info{
.sampler = VK_NULL_HANDLE,
.imageView = blit_image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
VkWriteDescriptorSet sampler_write{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptor_sets[image_index * 2],
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
VkWriteDescriptorSet output_write{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptor_sets[image_index * 2],
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.pImageInfo = &fsr_image_info,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
device.GetLogical().UpdateDescriptorSets(std::array{sampler_write, output_write}, {});
sampler_write.dstSet = descriptor_sets[image_index * 2 + 1];
sampler_write.pImageInfo = &fsr_image_info;
output_write.dstSet = descriptor_sets[image_index * 2 + 1];
output_write.pImageInfo = &blit_image_info;
device.GetLogical().UpdateDescriptorSets(std::array{sampler_write, output_write}, {});
} }
void FSR::CreateSampler() { void FSR::UpdateDescriptorSets(VkImageView image_view, size_t image_index) {
const VkSamplerCreateInfo ci{ Images& images = m_dynamic_images[image_index];
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, std::vector<VkDescriptorImageInfo> image_infos;
.pNext = nullptr, std::vector<VkWriteDescriptorSet> updates;
.flags = 0, image_infos.reserve(2);
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.mipLodBias = 0.0f,
.anisotropyEnable = VK_FALSE,
.maxAnisotropy = 0.0f,
.compareEnable = VK_FALSE,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = 0.0f,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = VK_FALSE,
};
sampler = device.GetLogical().CreateSampler(ci); updates.push_back(CreateWriteDescriptorSet(image_infos, *m_sampler, image_view,
images.descriptor_sets[Easu], 0));
updates.push_back(CreateWriteDescriptorSet(image_infos, *m_sampler, *images.image_views[Easu],
images.descriptor_sets[Rcas], 0));
m_device.GetLogical().UpdateDescriptorSets(updates, {});
} }
void FSR::CreateShaders() { void FSR::UploadImages(Scheduler& scheduler) {
if (device.IsFloat16Supported()) { if (m_images_ready) {
easu_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_EASU_FP16_COMP_SPV); return;
rcas_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_RCAS_FP16_COMP_SPV);
} else {
easu_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_EASU_FP32_COMP_SPV);
rcas_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_RCAS_FP32_COMP_SPV);
} }
scheduler.Record([&](vk::CommandBuffer cmdbuf) {
for (auto& image : m_dynamic_images) {
ClearColorImage(cmdbuf, *image.images[Easu]);
ClearColorImage(cmdbuf, *image.images[Rcas]);
}
});
scheduler.Finish();
m_images_ready = true;
} }
void FSR::CreatePipeline() { VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkPipelineShaderStageCreateInfo shader_stage_easu{ VkImageView source_image_view, VkExtent2D input_image_extent,
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, const Common::Rectangle<f32>& crop_rect) {
.pNext = nullptr, Images& images = m_dynamic_images[image_index];
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *easu_shader,
.pName = "main",
.pSpecializationInfo = nullptr,
};
VkPipelineShaderStageCreateInfo shader_stage_rcas{ VkImage easu_image = *images.images[Easu];
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, VkImage rcas_image = *images.images[Rcas];
.pNext = nullptr, VkDescriptorSet easu_descriptor_set = images.descriptor_sets[Easu];
.flags = 0, VkDescriptorSet rcas_descriptor_set = images.descriptor_sets[Rcas];
.stage = VK_SHADER_STAGE_COMPUTE_BIT, VkFramebuffer easu_framebuffer = *images.framebuffers[Easu];
.module = *rcas_shader, VkFramebuffer rcas_framebuffer = *images.framebuffers[Rcas];
.pName = "main", VkPipeline easu_pipeline = *m_easu_pipeline;
.pSpecializationInfo = nullptr, VkPipeline rcas_pipeline = *m_rcas_pipeline;
}; VkPipelineLayout pipeline_layout = *m_pipeline_layout;
VkRenderPass renderpass = *m_renderpass;
VkExtent2D extent = m_extent;
VkComputePipelineCreateInfo pipeline_ci_easu{ const f32 input_image_width = static_cast<f32>(input_image_extent.width);
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, const f32 input_image_height = static_cast<f32>(input_image_extent.height);
.pNext = nullptr, const f32 output_image_width = static_cast<f32>(extent.width);
.flags = 0, const f32 output_image_height = static_cast<f32>(extent.height);
.stage = shader_stage_easu, const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
.layout = *pipeline_layout, const f32 viewport_x = crop_rect.left * input_image_width;
.basePipelineHandle = VK_NULL_HANDLE, const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
.basePipelineIndex = 0, const f32 viewport_y = crop_rect.top * input_image_height;
};
VkComputePipelineCreateInfo pipeline_ci_rcas{ PushConstants easu_con{};
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, PushConstants rcas_con{};
.pNext = nullptr, FsrEasuConOffset(easu_con.data() + 0, easu_con.data() + 4, easu_con.data() + 8,
.flags = 0, easu_con.data() + 12, viewport_width, viewport_height, input_image_width,
.stage = shader_stage_rcas, input_image_height, output_image_width, output_image_height, viewport_x,
.layout = *pipeline_layout, viewport_y);
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = 0,
};
easu_pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci_easu); const float sharpening =
rcas_pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci_rcas); static_cast<float>(Settings::values.fsr_sharpening_slider.GetValue()) / 100.0f;
FsrRcasCon(rcas_con.data(), sharpening);
UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, easu_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, easu_framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, easu_pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0,
easu_descriptor_set, {});
cmdbuf.PushConstants(pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, easu_con);
cmdbuf.Draw(3, 1, 0, 0);
cmdbuf.EndRenderPass();
TransitionImageLayout(cmdbuf, easu_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, rcas_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, rcas_framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, rcas_pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0,
rcas_descriptor_set, {});
cmdbuf.PushConstants(pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, rcas_con);
cmdbuf.Draw(3, 1, 0, 0);
cmdbuf.EndRenderPass();
TransitionImageLayout(cmdbuf, rcas_image, VK_IMAGE_LAYOUT_GENERAL);
});
return *images.image_views[Rcas];
} }
} // namespace Vulkan } // namespace Vulkan

View file

@ -15,38 +15,55 @@ class Scheduler;
class FSR { class FSR {
public: public:
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count, explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
VkExtent2D output_size); VkExtent2D extent);
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view, VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect); VkImageView source_image_view, VkExtent2D input_image_extent,
const Common::Rectangle<f32>& crop_rect);
private: private:
void CreateImages();
void CreateRenderPasses();
void CreateSampler();
void CreateShaders();
void CreateDescriptorPool(); void CreateDescriptorPool();
void CreateDescriptorSetLayout(); void CreateDescriptorSetLayout();
void CreateDescriptorSets(); void CreateDescriptorSets();
void CreateImages(); void CreatePipelineLayouts();
void CreateSampler(); void CreatePipelines();
void CreateShaders();
void CreatePipeline();
void CreatePipelineLayout();
void UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const; void UploadImages(Scheduler& scheduler);
void UpdateDescriptorSets(VkImageView image_view, size_t image_index);
const Device& device; const Device& m_device;
MemoryAllocator& memory_allocator; MemoryAllocator& m_memory_allocator;
size_t image_count; const size_t m_image_count;
VkExtent2D output_size; const VkExtent2D m_extent;
vk::DescriptorPool descriptor_pool; enum FsrStage {
vk::DescriptorSetLayout descriptor_set_layout; Easu,
vk::DescriptorSets descriptor_sets; Rcas,
vk::PipelineLayout pipeline_layout; MaxFsrStage,
vk::ShaderModule easu_shader; };
vk::ShaderModule rcas_shader;
vk::Pipeline easu_pipeline; vk::DescriptorPool m_descriptor_pool;
vk::Pipeline rcas_pipeline; vk::DescriptorSetLayout m_descriptor_set_layout;
vk::Sampler sampler; vk::PipelineLayout m_pipeline_layout;
std::vector<vk::Image> images; vk::ShaderModule m_vert_shader;
std::vector<vk::ImageView> image_views; vk::ShaderModule m_easu_shader;
vk::ShaderModule m_rcas_shader;
vk::Pipeline m_easu_pipeline;
vk::Pipeline m_rcas_pipeline;
vk::RenderPass m_renderpass;
vk::Sampler m_sampler;
struct Images {
vk::DescriptorSets descriptor_sets;
std::array<vk::Image, MaxFsrStage> images;
std::array<vk::ImageView, MaxFsrStage> image_views;
std::array<vk::Framebuffer, MaxFsrStage> framebuffers;
};
std::vector<Images> m_dynamic_images;
bool m_images_ready{};
}; };
} // namespace Vulkan } // namespace Vulkan

View file

@ -63,7 +63,9 @@ void FXAA::CreateDescriptorPool() {
} }
void FXAA::CreateDescriptorSetLayouts() { void FXAA::CreateDescriptorSetLayouts() {
m_descriptor_set_layout = CreateWrappedDescriptorSetLayout(m_device, 2); m_descriptor_set_layout =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
} }
void FXAA::CreateDescriptorSets() { void FXAA::CreateDescriptorSets() {
@ -112,9 +114,10 @@ void FXAA::UploadImages(Scheduler& scheduler) {
m_images_ready = true; m_images_ready = true;
} }
VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, void FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) { VkImageView* inout_image_view) {
const Image& image{m_dynamic_images[image_index]}; const Image& image{m_dynamic_images[image_index]};
const VkImage input_image{*inout_image};
const VkImage output_image{*image.image}; const VkImage output_image{*image.image};
const VkDescriptorSet descriptor_set{image.descriptor_sets[0]}; const VkDescriptorSet descriptor_set{image.descriptor_sets[0]};
const VkFramebuffer framebuffer{*image.framebuffer}; const VkFramebuffer framebuffer{*image.framebuffer};
@ -124,11 +127,11 @@ VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
const VkExtent2D extent{m_extent}; const VkExtent2D extent{m_extent};
UploadImages(scheduler); UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index); UpdateDescriptorSets(*inout_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=](vk::CommandBuffer cmdbuf) { scheduler.Record([=](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, input_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, framebuffer, extent); BeginRenderPass(cmdbuf, renderpass, framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@ -138,7 +141,8 @@ VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
}); });
return *image.image_view; *inout_image = *image.image;
*inout_image_view = *image.image_view;
} }
} // namespace Vulkan } // namespace Vulkan

View file

@ -19,8 +19,8 @@ public:
VkExtent2D extent); VkExtent2D extent);
~FXAA() override; ~FXAA() override;
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) override; VkImageView* inout_image_view) override;
private: private:
void CreateImages(); void CreateImages();

View file

@ -122,10 +122,15 @@ void SMAA::CreateDescriptorPool() {
} }
void SMAA::CreateDescriptorSetLayouts() { void SMAA::CreateDescriptorSetLayouts() {
m_descriptor_set_layouts[EdgeDetection] = CreateWrappedDescriptorSetLayout(m_device, 1); m_descriptor_set_layouts[EdgeDetection] =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
m_descriptor_set_layouts[BlendingWeightCalculation] = m_descriptor_set_layouts[BlendingWeightCalculation] =
CreateWrappedDescriptorSetLayout(m_device, 3); CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
m_descriptor_set_layouts[NeighborhoodBlending] = CreateWrappedDescriptorSetLayout(m_device, 2); VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
m_descriptor_set_layouts[NeighborhoodBlending] =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
} }
void SMAA::CreateDescriptorSets() { void SMAA::CreateDescriptorSets() {
@ -204,10 +209,11 @@ void SMAA::UploadImages(Scheduler& scheduler) {
m_images_ready = true; m_images_ready = true;
} }
VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, void SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) { VkImageView* inout_image_view) {
Images& images = m_dynamic_images[image_index]; Images& images = m_dynamic_images[image_index];
VkImage input_image = *inout_image;
VkImage output_image = *images.images[Output]; VkImage output_image = *images.images[Output];
VkImage edges_image = *images.images[Edges]; VkImage edges_image = *images.images[Edges];
VkImage blend_image = *images.images[Blend]; VkImage blend_image = *images.images[Blend];
@ -224,11 +230,11 @@ VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
VkFramebuffer neighborhood_blending_framebuffer = *images.framebuffers[NeighborhoodBlending]; VkFramebuffer neighborhood_blending_framebuffer = *images.framebuffers[NeighborhoodBlending];
UploadImages(scheduler); UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index); UpdateDescriptorSets(*inout_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=, this](vk::CommandBuffer cmdbuf) { scheduler.Record([=, this](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, input_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, edges_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, edges_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, *m_renderpasses[EdgeDetection], edge_detection_framebuffer, BeginRenderPass(cmdbuf, *m_renderpasses[EdgeDetection], edge_detection_framebuffer,
m_extent); m_extent);
@ -264,7 +270,8 @@ VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL); TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
}); });
return *images.image_views[Output]; *inout_image = *images.images[Output];
*inout_image_view = *images.image_views[Output];
} }
} // namespace Vulkan } // namespace Vulkan

View file

@ -20,8 +20,8 @@ public:
VkExtent2D extent); VkExtent2D extent);
~SMAA() override; ~SMAA() override;
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image, void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView source_image_view) override; VkImageView* inout_image_view) override;
private: private:
enum SMAAStage { enum SMAAStage {

View file

@ -215,32 +215,37 @@ vk::ShaderModule CreateWrappedShaderModule(const Device& device, std::span<const
}); });
} }
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, u32 max_descriptors, vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, size_t max_descriptors,
u32 max_sets) { size_t max_sets,
const VkDescriptorPoolSize pool_size{ std::initializer_list<VkDescriptorType> types) {
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, std::vector<VkDescriptorPoolSize> pool_sizes(types.size());
.descriptorCount = static_cast<u32>(max_descriptors), for (u32 i = 0; i < types.size(); i++) {
}; pool_sizes[i] = VkDescriptorPoolSize{
.type = std::data(types)[i],
.descriptorCount = static_cast<u32>(max_descriptors),
};
}
return device.GetLogical().CreateDescriptorPool(VkDescriptorPoolCreateInfo{ return device.GetLogical().CreateDescriptorPool(VkDescriptorPoolCreateInfo{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
.maxSets = max_sets, .maxSets = static_cast<u32>(max_sets),
.poolSizeCount = 1, .poolSizeCount = static_cast<u32>(pool_sizes.size()),
.pPoolSizes = &pool_size, .pPoolSizes = pool_sizes.data(),
}); });
} }
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(const Device& device, vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(
u32 max_sampler_bindings) { const Device& device, std::initializer_list<VkDescriptorType> types) {
std::vector<VkDescriptorSetLayoutBinding> bindings(max_sampler_bindings); std::vector<VkDescriptorSetLayoutBinding> bindings(types.size());
for (u32 i = 0; i < max_sampler_bindings; i++) { for (size_t i = 0; i < types.size(); i++) {
bindings[i] = { bindings[i] = {
.binding = i, .binding = static_cast<u32>(i),
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, .descriptorType = std::data(types)[i],
.descriptorCount = 1, .descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, .stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr, .pImmutableSamplers = nullptr,
}; };
} }

View file

@ -25,10 +25,12 @@ vk::Framebuffer CreateWrappedFramebuffer(const Device& device, vk::RenderPass& r
vk::ImageView& dest_image, VkExtent2D extent); vk::ImageView& dest_image, VkExtent2D extent);
vk::Sampler CreateWrappedSampler(const Device& device, VkFilter filter = VK_FILTER_LINEAR); vk::Sampler CreateWrappedSampler(const Device& device, VkFilter filter = VK_FILTER_LINEAR);
vk::ShaderModule CreateWrappedShaderModule(const Device& device, std::span<const u32> code); vk::ShaderModule CreateWrappedShaderModule(const Device& device, std::span<const u32> code);
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, u32 max_sampler_bindings, vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, size_t max_descriptors,
u32 max_sets); size_t max_sets,
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(const Device& device, std::initializer_list<VkDescriptorType> types = {
u32 max_sampler_bindings); VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(
const Device& device, std::initializer_list<VkDescriptorType> types);
vk::DescriptorSets CreateWrappedDescriptorSets(vk::DescriptorPool& pool, vk::DescriptorSets CreateWrappedDescriptorSets(vk::DescriptorPool& pool,
vk::Span<VkDescriptorSetLayout> layouts); vk::Span<VkDescriptorSetLayout> layouts);
vk::PipelineLayout CreateWrappedPipelineLayout(const Device& device, vk::PipelineLayout CreateWrappedPipelineLayout(const Device& device,

View file

@ -234,7 +234,7 @@ void BlitScreen::Draw(RasterizerVulkan& rasterizer, const Tegra::FramebufferConf
}); });
} }
source_image_view = anti_alias->Draw(scheduler, image_index, source_image, source_image_view); anti_alias->Draw(scheduler, image_index, &source_image, &source_image_view);
const auto crop_rect = Tegra::NormalizeCrop(framebuffer, texture_width, texture_height); const auto crop_rect = Tegra::NormalizeCrop(framebuffer, texture_width, texture_height);
const VkExtent2D render_extent{ const VkExtent2D render_extent{
@ -248,8 +248,8 @@ void BlitScreen::Draw(RasterizerVulkan& rasterizer, const Tegra::FramebufferConf
.height = layout.screen.GetHeight(), .height = layout.screen.GetHeight(),
}; };
source_image_view = source_image_view = fsr->Draw(scheduler, image_index, source_image, source_image_view,
fsr->Draw(scheduler, image_index, source_image_view, render_extent, crop_rect); render_extent, crop_rect);
const Common::Rectangle<f32> output_crop{0, 0, 1, 1}; const Common::Rectangle<f32> output_crop{0, 0, 1, 1};
window_adapt->Draw(scheduler, image_index, source_image_view, adapt_size, output_crop, window_adapt->Draw(scheduler, image_index, source_image_view, adapt_size, output_crop,