mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-23 15:22:45 +01:00
Merge pull request #5286 from ReinUsesLisp/rename-vk-device
renderer_vulkan: Rename VKDevice to Device
This commit is contained in:
commit
4801f4250d
52 changed files with 166 additions and 169 deletions
|
@ -225,7 +225,7 @@ constexpr std::array<VkPipelineShaderStageCreateInfo, 2> MakeStages(
|
|||
};
|
||||
}
|
||||
|
||||
void UpdateOneTextureDescriptorSet(const VKDevice& device, VkDescriptorSet descriptor_set,
|
||||
void UpdateOneTextureDescriptorSet(const Device& device, VkDescriptorSet descriptor_set,
|
||||
VkSampler sampler, VkImageView image_view) {
|
||||
const VkDescriptorImageInfo image_info{
|
||||
.sampler = sampler,
|
||||
|
@ -247,7 +247,7 @@ void UpdateOneTextureDescriptorSet(const VKDevice& device, VkDescriptorSet descr
|
|||
device.GetLogical().UpdateDescriptorSets(write_descriptor_set, nullptr);
|
||||
}
|
||||
|
||||
void UpdateTwoTexturesDescriptorSet(const VKDevice& device, VkDescriptorSet descriptor_set,
|
||||
void UpdateTwoTexturesDescriptorSet(const Device& device, VkDescriptorSet descriptor_set,
|
||||
VkSampler sampler, VkImageView image_view_0,
|
||||
VkImageView image_view_1) {
|
||||
const VkDescriptorImageInfo image_info_0{
|
||||
|
@ -326,7 +326,7 @@ void BindBlitState(vk::CommandBuffer cmdbuf, VkPipelineLayout layout,
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
BlitImageHelper::BlitImageHelper(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
|
||||
StateTracker& state_tracker_, VKDescriptorPool& descriptor_pool)
|
||||
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
|
||||
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
|
||||
|
|
|
@ -15,12 +15,11 @@ namespace Vulkan {
|
|||
|
||||
using VideoCommon::Offset2D;
|
||||
|
||||
class VKDevice;
|
||||
class VKScheduler;
|
||||
class StateTracker;
|
||||
|
||||
class Device;
|
||||
class Framebuffer;
|
||||
class ImageView;
|
||||
class StateTracker;
|
||||
class VKScheduler;
|
||||
|
||||
struct BlitImagePipelineKey {
|
||||
constexpr auto operator<=>(const BlitImagePipelineKey&) const noexcept = default;
|
||||
|
@ -31,7 +30,7 @@ struct BlitImagePipelineKey {
|
|||
|
||||
class BlitImageHelper {
|
||||
public:
|
||||
explicit BlitImageHelper(const VKDevice& device, VKScheduler& scheduler,
|
||||
explicit BlitImageHelper(const Device& device, VKScheduler& scheduler,
|
||||
StateTracker& state_tracker, VKDescriptorPool& descriptor_pool);
|
||||
~BlitImageHelper();
|
||||
|
||||
|
@ -67,7 +66,7 @@ private:
|
|||
|
||||
void ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRenderPass renderpass);
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
StateTracker& state_tracker;
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter
|
|||
return {};
|
||||
}
|
||||
|
||||
VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
|
||||
VkSamplerAddressMode WrapMode(const Device& device, Tegra::Texture::WrapMode wrap_mode,
|
||||
Tegra::Texture::TextureFilter filter) {
|
||||
switch (wrap_mode) {
|
||||
case Tegra::Texture::WrapMode::Wrap:
|
||||
|
@ -222,7 +222,7 @@ constexpr bool IsZetaFormat(PixelFormat pixel_format) {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format) {
|
||||
FormatInfo SurfaceFormat(const Device& device, FormatType format_type, PixelFormat pixel_format) {
|
||||
ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples));
|
||||
|
||||
auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)];
|
||||
|
@ -280,7 +280,7 @@ VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
|
|||
return {};
|
||||
}
|
||||
|
||||
VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
|
||||
VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const Device& device,
|
||||
Maxwell::PrimitiveTopology topology) {
|
||||
switch (topology) {
|
||||
case Maxwell::PrimitiveTopology::Points:
|
||||
|
@ -526,7 +526,7 @@ VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
|
|||
return {};
|
||||
}
|
||||
|
||||
VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) {
|
||||
VkIndexType IndexFormat(const Device& device, Maxwell::IndexFormat index_format) {
|
||||
switch (index_format) {
|
||||
case Maxwell::IndexFormat::UnsignedByte:
|
||||
if (!device.IsExtIndexTypeUint8Supported()) {
|
||||
|
|
|
@ -22,7 +22,7 @@ VkFilter Filter(Tegra::Texture::TextureFilter filter);
|
|||
|
||||
VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
|
||||
|
||||
VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
|
||||
VkSamplerAddressMode WrapMode(const Device& device, Tegra::Texture::WrapMode wrap_mode,
|
||||
Tegra::Texture::TextureFilter filter);
|
||||
|
||||
VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
|
||||
|
@ -35,17 +35,17 @@ struct FormatInfo {
|
|||
bool storage;
|
||||
};
|
||||
|
||||
FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format);
|
||||
FormatInfo SurfaceFormat(const Device& device, FormatType format_type, PixelFormat pixel_format);
|
||||
|
||||
VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
|
||||
|
||||
VkPrimitiveTopology PrimitiveTopology(const VKDevice& device, Maxwell::PrimitiveTopology topology);
|
||||
VkPrimitiveTopology PrimitiveTopology(const Device& device, Maxwell::PrimitiveTopology topology);
|
||||
|
||||
VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
|
||||
|
||||
VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
|
||||
|
||||
VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format);
|
||||
VkIndexType IndexFormat(const Device& device, Maxwell::IndexFormat index_format);
|
||||
|
||||
VkStencilOp StencilOp(Maxwell::StencilOp stencil_op);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ std::string GetReadableVersion(u32 version) {
|
|||
VK_VERSION_PATCH(version));
|
||||
}
|
||||
|
||||
std::string GetDriverVersion(const VKDevice& device) {
|
||||
std::string GetDriverVersion(const Device& device) {
|
||||
// Extracted from
|
||||
// https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314
|
||||
const u32 version = device.GetDriverVersion();
|
||||
|
@ -184,7 +184,7 @@ void RendererVulkan::InitializeDevice() {
|
|||
throw vk::Exception(VK_ERROR_INITIALIZATION_FAILED);
|
||||
}
|
||||
const vk::PhysicalDevice physical_device(devices[static_cast<size_t>(device_index)], dld);
|
||||
device = std::make_unique<VKDevice>(*instance, physical_device, *surface, dld);
|
||||
device = std::make_unique<Device>(*instance, physical_device, *surface, dld);
|
||||
}
|
||||
|
||||
void RendererVulkan::Report() const {
|
||||
|
|
|
@ -27,9 +27,9 @@ class GPU;
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class StateTracker;
|
||||
class VKBlitScreen;
|
||||
class VKDevice;
|
||||
class VKMemoryManager;
|
||||
class VKSwapchain;
|
||||
class VKScheduler;
|
||||
|
@ -74,7 +74,7 @@ private:
|
|||
VKScreenInfo screen_info;
|
||||
|
||||
vk::DebugUtilsMessenger debug_callback;
|
||||
std::unique_ptr<VKDevice> device;
|
||||
std::unique_ptr<Device> device;
|
||||
std::unique_ptr<VKMemoryManager> memory_manager;
|
||||
std::unique_ptr<StateTracker> state_tracker;
|
||||
std::unique_ptr<VKScheduler> scheduler;
|
||||
|
|
|
@ -114,7 +114,7 @@ struct VKBlitScreen::BufferData {
|
|||
|
||||
VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_,
|
||||
Core::Frontend::EmuWindow& render_window_,
|
||||
VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_,
|
||||
VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
|
||||
VKMemoryManager& memory_manager_, VKSwapchain& swapchain_,
|
||||
VKScheduler& scheduler_, const VKScreenInfo& screen_info_)
|
||||
: cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_},
|
||||
|
|
|
@ -33,8 +33,8 @@ namespace Vulkan {
|
|||
|
||||
struct ScreenInfo;
|
||||
|
||||
class Device;
|
||||
class RasterizerVulkan;
|
||||
class VKDevice;
|
||||
class VKScheduler;
|
||||
class VKSwapchain;
|
||||
|
||||
|
@ -42,7 +42,7 @@ class VKBlitScreen final {
|
|||
public:
|
||||
explicit VKBlitScreen(Core::Memory::Memory& cpu_memory,
|
||||
Core::Frontend::EmuWindow& render_window,
|
||||
VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
|
||||
VideoCore::RasterizerInterface& rasterizer, const Device& device,
|
||||
VKMemoryManager& memory_manager, VKSwapchain& swapchain,
|
||||
VKScheduler& scheduler, const VKScreenInfo& screen_info);
|
||||
~VKBlitScreen();
|
||||
|
@ -85,7 +85,7 @@ private:
|
|||
Core::Memory::Memory& cpu_memory;
|
||||
Core::Frontend::EmuWindow& render_window;
|
||||
VideoCore::RasterizerInterface& rasterizer;
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKMemoryManager& memory_manager;
|
||||
VKSwapchain& swapchain;
|
||||
VKScheduler& scheduler;
|
||||
|
|
|
@ -34,13 +34,13 @@ constexpr VkAccessFlags UPLOAD_ACCESS_BARRIERS =
|
|||
constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS =
|
||||
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT;
|
||||
|
||||
std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
|
||||
std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const Device& device, VKScheduler& scheduler) {
|
||||
return std::make_unique<VKStreamBuffer>(device, scheduler);
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
Buffer::Buffer(const VKDevice& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
||||
Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
||||
VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_)
|
||||
: BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{
|
||||
staging_pool_} {
|
||||
|
@ -168,7 +168,7 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst
|
|||
|
||||
VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
||||
const VKDevice& device_, VKMemoryManager& memory_manager_,
|
||||
const Device& device_, VKMemoryManager& memory_manager_,
|
||||
VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_,
|
||||
VKStagingBufferPool& staging_pool_)
|
||||
: VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_,
|
||||
|
|
|
@ -15,13 +15,13 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKMemoryManager;
|
||||
class VKScheduler;
|
||||
|
||||
class Buffer final : public VideoCommon::BufferBlock {
|
||||
public:
|
||||
explicit Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
||||
explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
||||
VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_);
|
||||
~Buffer();
|
||||
|
||||
|
@ -41,7 +41,7 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
VKStagingBufferPool& staging_pool;
|
||||
|
||||
|
@ -52,7 +52,7 @@ class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VK
|
|||
public:
|
||||
explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
|
||||
Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
|
||||
const VKDevice& device, VKMemoryManager& memory_manager,
|
||||
const Device& device, VKMemoryManager& memory_manager,
|
||||
VKScheduler& scheduler, VKStreamBuffer& stream_buffer,
|
||||
VKStagingBufferPool& staging_pool);
|
||||
~VKBufferCache();
|
||||
|
@ -63,7 +63,7 @@ protected:
|
|||
std::shared_ptr<Buffer> CreateBlock(VAddr cpu_addr, std::size_t size) override;
|
||||
|
||||
private:
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKMemoryManager& memory_manager;
|
||||
VKScheduler& scheduler;
|
||||
VKStagingBufferPool& staging_pool;
|
||||
|
|
|
@ -17,7 +17,7 @@ struct CommandPool::Pool {
|
|||
vk::CommandBuffers cmdbufs;
|
||||
};
|
||||
|
||||
CommandPool::CommandPool(MasterSemaphore& master_semaphore_, const VKDevice& device_)
|
||||
CommandPool::CommandPool(MasterSemaphore& master_semaphore_, const Device& device_)
|
||||
: ResourcePool(master_semaphore_, COMMAND_BUFFER_POOL_SIZE), device{device_} {}
|
||||
|
||||
CommandPool::~CommandPool() = default;
|
||||
|
|
|
@ -12,12 +12,12 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class MasterSemaphore;
|
||||
class VKDevice;
|
||||
|
||||
class CommandPool final : public ResourcePool {
|
||||
public:
|
||||
explicit CommandPool(MasterSemaphore& master_semaphore_, const VKDevice& device_);
|
||||
explicit CommandPool(MasterSemaphore& master_semaphore_, const Device& device_);
|
||||
~CommandPool() override;
|
||||
|
||||
void Allocate(size_t begin, size_t end) override;
|
||||
|
@ -27,7 +27,7 @@ public:
|
|||
private:
|
||||
struct Pool;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
std::vector<Pool> pools;
|
||||
};
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
|
||||
VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
vk::Span<VkPushConstantRange> push_constants,
|
||||
|
@ -162,7 +162,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet(
|
|||
return set;
|
||||
}
|
||||
|
||||
QuadArrayPass::QuadArrayPass(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
|
@ -211,7 +211,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
|
|||
return {*buffer.handle, 0};
|
||||
}
|
||||
|
||||
Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler_,
|
||||
Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
: VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
|
||||
|
@ -255,7 +255,7 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff
|
|||
return {*buffer.handle, 0};
|
||||
}
|
||||
|
||||
QuadIndexedPass::QuadIndexedPass(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
|
|
|
@ -15,14 +15,14 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
class VKStagingBufferPool;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
class VKComputePass {
|
||||
public:
|
||||
explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
|
||||
explicit VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
|
||||
|
@ -43,7 +43,7 @@ private:
|
|||
|
||||
class QuadArrayPass final : public VKComputePass {
|
||||
public:
|
||||
explicit QuadArrayPass(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||
|
@ -59,7 +59,7 @@ private:
|
|||
|
||||
class Uint8Pass final : public VKComputePass {
|
||||
public:
|
||||
explicit Uint8Pass(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||
|
@ -75,7 +75,7 @@ private:
|
|||
|
||||
class QuadIndexedPass final : public VKComputePass {
|
||||
public:
|
||||
explicit QuadIndexedPass(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKStagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
VKComputePipeline::VKComputePipeline(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
VKComputePipeline::VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const SPIRVShader& shader_)
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
class VKComputePipeline final {
|
||||
public:
|
||||
explicit VKComputePipeline(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
explicit VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const SPIRVShader& shader_);
|
||||
|
@ -48,7 +48,7 @@ private:
|
|||
|
||||
vk::Pipeline CreatePipeline() const;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
ShaderEntries entries;
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
|
|||
descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
|
||||
}
|
||||
|
||||
VKDescriptorPool::VKDescriptorPool(const VKDevice& device_, VKScheduler& scheduler)
|
||||
VKDescriptorPool::VKDescriptorPool(const Device& device_, VKScheduler& scheduler)
|
||||
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
|
||||
AllocateNewPool()} {}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKDescriptorPool;
|
||||
class VKScheduler;
|
||||
|
||||
|
@ -39,7 +39,7 @@ class VKDescriptorPool final {
|
|||
friend DescriptorAllocator;
|
||||
|
||||
public:
|
||||
explicit VKDescriptorPool(const VKDevice& device, VKScheduler& scheduler);
|
||||
explicit VKDescriptorPool(const Device& device, VKScheduler& scheduler);
|
||||
~VKDescriptorPool();
|
||||
|
||||
VKDescriptorPool(const VKDescriptorPool&) = delete;
|
||||
|
@ -50,7 +50,7 @@ private:
|
|||
|
||||
vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
MasterSemaphore& master_semaphore;
|
||||
|
||||
std::vector<vk::DescriptorPool> pools;
|
||||
|
|
|
@ -206,8 +206,8 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKDevice::VKDevice(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR surface,
|
||||
const vk::InstanceDispatch& dld_)
|
||||
Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR surface,
|
||||
const vk::InstanceDispatch& dld_)
|
||||
: instance{instance_}, dld{dld_}, physical{physical_}, properties{physical.GetProperties()},
|
||||
format_properties{GetFormatProperties(physical, dld)} {
|
||||
CheckSuitability();
|
||||
|
@ -449,10 +449,10 @@ VKDevice::VKDevice(VkInstance instance_, vk::PhysicalDevice physical_, VkSurface
|
|||
use_asynchronous_shaders = Settings::values.use_asynchronous_shaders.GetValue();
|
||||
}
|
||||
|
||||
VKDevice::~VKDevice() = default;
|
||||
Device::~Device() = default;
|
||||
|
||||
VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
|
||||
FormatType format_type) const {
|
||||
VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
|
||||
FormatType format_type) const {
|
||||
if (IsFormatSupported(wanted_format, wanted_usage, format_type)) {
|
||||
return wanted_format;
|
||||
}
|
||||
|
@ -483,18 +483,18 @@ VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFla
|
|||
return wanted_format;
|
||||
}
|
||||
|
||||
void VKDevice::ReportLoss() const {
|
||||
LOG_CRITICAL(Render_Vulkan, "Device loss occurred!");
|
||||
void Device::ReportLoss() const {
|
||||
LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
|
||||
|
||||
// Wait for the log to flush and for Nsight Aftermath to dump the results
|
||||
std::this_thread::sleep_for(std::chrono::seconds{15});
|
||||
}
|
||||
|
||||
void VKDevice::SaveShader(const std::vector<u32>& spirv) const {
|
||||
void Device::SaveShader(const std::vector<u32>& spirv) const {
|
||||
nsight_aftermath_tracker.SaveShader(spirv);
|
||||
}
|
||||
|
||||
bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
|
||||
bool Device::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
|
||||
// Disable for now to avoid converting ASTC twice.
|
||||
static constexpr std::array astc_formats = {
|
||||
VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
|
||||
|
@ -528,7 +528,7 @@ bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool VKDevice::TestDepthStencilBlits() const {
|
||||
bool Device::TestDepthStencilBlits() const {
|
||||
static constexpr VkFormatFeatureFlags required_features =
|
||||
VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
|
||||
const auto test_features = [](VkFormatProperties props) {
|
||||
|
@ -538,8 +538,8 @@ bool VKDevice::TestDepthStencilBlits() const {
|
|||
test_features(format_properties.at(VK_FORMAT_D24_UNORM_S8_UINT));
|
||||
}
|
||||
|
||||
bool VKDevice::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
|
||||
FormatType format_type) const {
|
||||
bool Device::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
|
||||
FormatType format_type) const {
|
||||
const auto it = format_properties.find(wanted_format);
|
||||
if (it == format_properties.end()) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented format query={}", wanted_format);
|
||||
|
@ -549,7 +549,7 @@ bool VKDevice::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wa
|
|||
return (supported_usage & wanted_usage) == wanted_usage;
|
||||
}
|
||||
|
||||
void VKDevice::CheckSuitability() const {
|
||||
void Device::CheckSuitability() const {
|
||||
std::bitset<REQUIRED_EXTENSIONS.size()> available_extensions;
|
||||
for (const VkExtensionProperties& property : physical.EnumerateDeviceExtensionProperties()) {
|
||||
for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
|
||||
|
@ -614,7 +614,7 @@ void VKDevice::CheckSuitability() const {
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<const char*> VKDevice::LoadExtensions() {
|
||||
std::vector<const char*> Device::LoadExtensions() {
|
||||
std::vector<const char*> extensions;
|
||||
extensions.reserve(7 + REQUIRED_EXTENSIONS.size());
|
||||
extensions.insert(extensions.begin(), REQUIRED_EXTENSIONS.begin(), REQUIRED_EXTENSIONS.end());
|
||||
|
@ -767,7 +767,7 @@ std::vector<const char*> VKDevice::LoadExtensions() {
|
|||
return extensions;
|
||||
}
|
||||
|
||||
void VKDevice::SetupFamilies(VkSurfaceKHR surface) {
|
||||
void Device::SetupFamilies(VkSurfaceKHR surface) {
|
||||
const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
|
||||
std::optional<u32> graphics;
|
||||
std::optional<u32> present;
|
||||
|
@ -798,14 +798,14 @@ void VKDevice::SetupFamilies(VkSurfaceKHR surface) {
|
|||
present_family = *present;
|
||||
}
|
||||
|
||||
void VKDevice::SetupFeatures() {
|
||||
void Device::SetupFeatures() {
|
||||
const auto supported_features{physical.GetFeatures()};
|
||||
is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
|
||||
is_blit_depth_stencil_supported = TestDepthStencilBlits();
|
||||
is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
|
||||
}
|
||||
|
||||
void VKDevice::CollectTelemetryParameters() {
|
||||
void Device::CollectTelemetryParameters() {
|
||||
VkPhysicalDeviceDriverPropertiesKHR driver{
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR,
|
||||
.pNext = nullptr,
|
||||
|
@ -832,7 +832,7 @@ void VKDevice::CollectTelemetryParameters() {
|
|||
}
|
||||
}
|
||||
|
||||
void VKDevice::CollectToolingInfo() {
|
||||
void Device::CollectToolingInfo() {
|
||||
if (!ext_tooling_info) {
|
||||
return;
|
||||
}
|
||||
|
@ -858,7 +858,7 @@ void VKDevice::CollectToolingInfo() {
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const {
|
||||
std::vector<VkDeviceQueueCreateInfo> Device::GetDeviceQueueCreateInfos() const {
|
||||
static constexpr float QUEUE_PRIORITY = 1.0f;
|
||||
|
||||
std::unordered_set<u32> unique_queue_families{graphics_family, present_family};
|
||||
|
|
|
@ -22,11 +22,11 @@ enum class FormatType { Linear, Optimal, Buffer };
|
|||
const u32 GuestWarpSize = 32;
|
||||
|
||||
/// Handles data specific to a physical device.
|
||||
class VKDevice final {
|
||||
class Device final {
|
||||
public:
|
||||
explicit VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
|
||||
const vk::InstanceDispatch& dld);
|
||||
~VKDevice();
|
||||
explicit Device(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
|
||||
const vk::InstanceDispatch& dld);
|
||||
~Device();
|
||||
|
||||
/**
|
||||
* Returns a format supported by the device for the passed requeriments.
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
InnerFence::InnerFence(const VKDevice& device_, VKScheduler& scheduler_, u32 payload_,
|
||||
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_,
|
||||
bool is_stubbed_)
|
||||
: FenceBase{payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
|
||||
|
||||
InnerFence::InnerFence(const VKDevice& device_, VKScheduler& scheduler_, GPUVAddr address_,
|
||||
InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
|
||||
u32 payload_, bool is_stubbed_)
|
||||
: FenceBase{address_, payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
|
||||
|
||||
|
@ -75,7 +75,7 @@ bool InnerFence::IsEventSignalled() const {
|
|||
VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
||||
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
|
||||
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
|
||||
const VKDevice& device_, VKScheduler& scheduler_)
|
||||
const Device& device_, VKScheduler& scheduler_)
|
||||
: GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
|
||||
device{device_}, scheduler{scheduler_} {}
|
||||
|
||||
|
|
|
@ -21,16 +21,16 @@ class RasterizerInterface;
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class VKBufferCache;
|
||||
class VKDevice;
|
||||
class VKQueryCache;
|
||||
class VKScheduler;
|
||||
|
||||
class InnerFence : public VideoCommon::FenceBase {
|
||||
public:
|
||||
explicit InnerFence(const VKDevice& device_, VKScheduler& scheduler_, u32 payload_,
|
||||
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_,
|
||||
bool is_stubbed_);
|
||||
explicit InnerFence(const VKDevice& device_, VKScheduler& scheduler_, GPUVAddr address_,
|
||||
explicit InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
|
||||
u32 payload_, bool is_stubbed_);
|
||||
~InnerFence();
|
||||
|
||||
|
@ -43,7 +43,7 @@ public:
|
|||
private:
|
||||
bool IsEventSignalled() const;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
vk::Event event;
|
||||
u64 ticks = 0;
|
||||
|
@ -58,7 +58,7 @@ public:
|
|||
explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
||||
Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
|
||||
VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
|
||||
const VKDevice& device_, VKScheduler& scheduler_);
|
||||
const Device& device_, VKScheduler& scheduler_);
|
||||
|
||||
protected:
|
||||
Fence CreateFence(u32 value, bool is_stubbed) override;
|
||||
|
@ -68,7 +68,7 @@ protected:
|
|||
void WaitFence(Fence& fence) override;
|
||||
|
||||
private:
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
};
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ VkSampleCountFlagBits ConvertMsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
VKGraphicsPipeline::VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const GraphicsPipelineCacheKey& key,
|
||||
|
|
|
@ -40,8 +40,8 @@ static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>
|
|||
static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
|
||||
static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
|
||||
|
||||
class Device;
|
||||
class VKDescriptorPool;
|
||||
class VKDevice;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
|
@ -49,7 +49,7 @@ using SPIRVProgram = std::array<std::optional<SPIRVShader>, Maxwell::MaxShaderSt
|
|||
|
||||
class VKGraphicsPipeline final {
|
||||
public:
|
||||
explicit VKGraphicsPipeline(const VKDevice& device_, VKScheduler& scheduler_,
|
||||
explicit VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const GraphicsPipelineCacheKey& key,
|
||||
|
@ -85,7 +85,7 @@ private:
|
|||
vk::Pipeline CreatePipeline(const SPIRVProgram& program, VkRenderPass renderpass,
|
||||
u32 num_color_buffers) const;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
const GraphicsPipelineCacheKey cache_key;
|
||||
const u64 hash;
|
||||
|
|
|
@ -14,7 +14,7 @@ namespace Vulkan {
|
|||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
MasterSemaphore::MasterSemaphore(const VKDevice& device) {
|
||||
MasterSemaphore::MasterSemaphore(const Device& device) {
|
||||
static constexpr VkSemaphoreTypeCreateInfoKHR semaphore_type_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
|
|
|
@ -12,11 +12,11 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
|
||||
class MasterSemaphore {
|
||||
public:
|
||||
explicit MasterSemaphore(const VKDevice& device);
|
||||
explicit MasterSemaphore(const Device& device);
|
||||
~MasterSemaphore();
|
||||
|
||||
/// Returns the current logical tick.
|
||||
|
|
|
@ -29,7 +29,7 @@ u64 GetAllocationChunkSize(u64 required_size) {
|
|||
|
||||
class VKMemoryAllocation final {
|
||||
public:
|
||||
explicit VKMemoryAllocation(const VKDevice& device_, vk::DeviceMemory memory_,
|
||||
explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
|
||||
VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
|
||||
: device{device_}, memory{std::move(memory_)}, properties{properties_},
|
||||
allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
|
||||
|
@ -104,7 +104,7 @@ private:
|
|||
return std::nullopt;
|
||||
}
|
||||
|
||||
const VKDevice& device; ///< Vulkan device.
|
||||
const Device& device; ///< Vulkan device.
|
||||
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
||||
const VkMemoryPropertyFlags properties; ///< Vulkan properties.
|
||||
const u64 allocation_size; ///< Size of this allocation.
|
||||
|
@ -117,7 +117,7 @@ private:
|
|||
std::vector<const VKMemoryCommitImpl*> commits;
|
||||
};
|
||||
|
||||
VKMemoryManager::VKMemoryManager(const VKDevice& device_)
|
||||
VKMemoryManager::VKMemoryManager(const Device& device_)
|
||||
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
|
||||
|
||||
VKMemoryManager::~VKMemoryManager() = default;
|
||||
|
@ -207,7 +207,7 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
|
|||
return {};
|
||||
}
|
||||
|
||||
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device_, VKMemoryAllocation* allocation_,
|
||||
VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
|
||||
const vk::DeviceMemory& memory_, u64 begin_, u64 end_)
|
||||
: device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {}
|
||||
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class MemoryMap;
|
||||
class VKDevice;
|
||||
class VKMemoryAllocation;
|
||||
class VKMemoryCommitImpl;
|
||||
|
||||
|
@ -22,7 +22,7 @@ using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
|
|||
|
||||
class VKMemoryManager final {
|
||||
public:
|
||||
explicit VKMemoryManager(const VKDevice& device_);
|
||||
explicit VKMemoryManager(const Device& device_);
|
||||
VKMemoryManager(const VKMemoryManager&) = delete;
|
||||
~VKMemoryManager();
|
||||
|
||||
|
@ -49,7 +49,7 @@ private:
|
|||
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
|
||||
VkMemoryPropertyFlags wanted_properties);
|
||||
|
||||
const VKDevice& device; ///< Device handler.
|
||||
const Device& device; ///< Device handler.
|
||||
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
|
||||
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
|
||||
};
|
||||
|
@ -59,7 +59,7 @@ class VKMemoryCommitImpl final {
|
|||
friend MemoryMap;
|
||||
|
||||
public:
|
||||
explicit VKMemoryCommitImpl(const VKDevice& device_, VKMemoryAllocation* allocation_,
|
||||
explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
|
||||
const vk::DeviceMemory& memory_, u64 begin_, u64 end_);
|
||||
~VKMemoryCommitImpl();
|
||||
|
||||
|
@ -85,7 +85,7 @@ private:
|
|||
/// Unmaps memory.
|
||||
void Unmap() const;
|
||||
|
||||
const VKDevice& device; ///< Vulkan device.
|
||||
const Device& device; ///< Vulkan device.
|
||||
const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
|
||||
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
||||
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
||||
|
|
|
@ -149,7 +149,7 @@ Shader::~Shader() = default;
|
|||
VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
||||
Tegra::MemoryManager& gpu_memory_, const VKDevice& device_,
|
||||
Tegra::MemoryManager& gpu_memory_, const Device& device_,
|
||||
VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
: VideoCommon::ShaderCache<Shader>{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_},
|
||||
|
|
|
@ -33,10 +33,10 @@ class System;
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class RasterizerVulkan;
|
||||
class VKComputePipeline;
|
||||
class VKDescriptorPool;
|
||||
class VKDevice;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
|
@ -121,7 +121,7 @@ public:
|
|||
explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
Tegra::MemoryManager& gpu_memory, const VKDevice& device,
|
||||
Tegra::MemoryManager& gpu_memory, const Device& device,
|
||||
VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue);
|
||||
~VKPipelineCache() override;
|
||||
|
@ -148,7 +148,7 @@ private:
|
|||
Tegra::Engines::KeplerCompute& kepler_compute;
|
||||
Tegra::MemoryManager& gpu_memory;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
VKDescriptorPool& descriptor_pool;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
|
|
|
@ -27,7 +27,7 @@ constexpr VkQueryType GetTarget(QueryType type) {
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
QueryPool::QueryPool(const VKDevice& device_, VKScheduler& scheduler, QueryType type_)
|
||||
QueryPool::QueryPool(const Device& device_, VKScheduler& scheduler, QueryType type_)
|
||||
: ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
|
||||
|
||||
QueryPool::~QueryPool() = default;
|
||||
|
@ -68,7 +68,7 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
|
|||
|
||||
VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
||||
const VKDevice& device_, VKScheduler& scheduler_)
|
||||
const Device& device_, VKScheduler& scheduler_)
|
||||
: QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
|
||||
query_pools{
|
||||
QueryPool{device_, scheduler_, QueryType::SamplesPassed},
|
||||
|
@ -96,9 +96,9 @@ void VKQueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) {
|
|||
HostCounter::HostCounter(VKQueryCache& cache_, std::shared_ptr<HostCounter> dependency_,
|
||||
QueryType type_)
|
||||
: HostCounterBase{std::move(dependency_)}, cache{cache_}, type{type_},
|
||||
query{cache_.AllocateQuery(type_)}, tick{cache_.Scheduler().CurrentTick()} {
|
||||
const vk::Device* logical = &cache_.Device().GetLogical();
|
||||
cache_.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
|
||||
query{cache_.AllocateQuery(type_)}, tick{cache_.GetScheduler().CurrentTick()} {
|
||||
const vk::Device* logical = &cache.GetDevice().GetLogical();
|
||||
cache.GetScheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
|
||||
logical->ResetQueryPoolEXT(query.first, query.second, 1);
|
||||
cmdbuf.BeginQuery(query.first, query.second, VK_QUERY_CONTROL_PRECISE_BIT);
|
||||
});
|
||||
|
@ -109,17 +109,17 @@ HostCounter::~HostCounter() {
|
|||
}
|
||||
|
||||
void HostCounter::EndQuery() {
|
||||
cache.Scheduler().Record(
|
||||
cache.GetScheduler().Record(
|
||||
[query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
|
||||
}
|
||||
|
||||
u64 HostCounter::BlockingQuery() const {
|
||||
if (tick >= cache.Scheduler().CurrentTick()) {
|
||||
cache.Scheduler().Flush();
|
||||
if (tick >= cache.GetScheduler().CurrentTick()) {
|
||||
cache.GetScheduler().Flush();
|
||||
}
|
||||
|
||||
u64 data;
|
||||
const VkResult query_result = cache.Device().GetLogical().GetQueryResults(
|
||||
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
|
||||
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
|
||||
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
|
||||
|
||||
|
@ -127,7 +127,7 @@ u64 HostCounter::BlockingQuery() const {
|
|||
case VK_SUCCESS:
|
||||
return data;
|
||||
case VK_ERROR_DEVICE_LOST:
|
||||
cache.Device().ReportLoss();
|
||||
cache.GetDevice().ReportLoss();
|
||||
[[fallthrough]];
|
||||
default:
|
||||
throw vk::Exception(query_result);
|
||||
|
|
|
@ -21,8 +21,8 @@ class RasterizerInterface;
|
|||
namespace Vulkan {
|
||||
|
||||
class CachedQuery;
|
||||
class Device;
|
||||
class HostCounter;
|
||||
class VKDevice;
|
||||
class VKQueryCache;
|
||||
class VKScheduler;
|
||||
|
||||
|
@ -30,7 +30,7 @@ using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
|
|||
|
||||
class QueryPool final : public ResourcePool {
|
||||
public:
|
||||
explicit QueryPool(const VKDevice& device, VKScheduler& scheduler, VideoCore::QueryType type);
|
||||
explicit QueryPool(const Device& device, VKScheduler& scheduler, VideoCore::QueryType type);
|
||||
~QueryPool() override;
|
||||
|
||||
std::pair<VkQueryPool, u32> Commit();
|
||||
|
@ -43,7 +43,7 @@ protected:
|
|||
private:
|
||||
static constexpr std::size_t GROW_STEP = 512;
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
const VideoCore::QueryType type;
|
||||
|
||||
std::vector<vk::QueryPool> pools;
|
||||
|
@ -55,23 +55,23 @@ class VKQueryCache final
|
|||
public:
|
||||
explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
|
||||
const VKDevice& device_, VKScheduler& scheduler_);
|
||||
const Device& device_, VKScheduler& scheduler_);
|
||||
~VKQueryCache();
|
||||
|
||||
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
|
||||
|
||||
void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
|
||||
|
||||
const VKDevice& Device() const noexcept {
|
||||
const Device& GetDevice() const noexcept {
|
||||
return device;
|
||||
}
|
||||
|
||||
VKScheduler& Scheduler() const noexcept {
|
||||
VKScheduler& GetScheduler() const noexcept {
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
private:
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
|
||||
};
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace {
|
|||
|
||||
constexpr auto COMPUTE_SHADER_INDEX = static_cast<size_t>(Tegra::Engines::ShaderType::Compute);
|
||||
|
||||
VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, size_t index) {
|
||||
VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t index) {
|
||||
const auto& src = regs.viewport_transform[index];
|
||||
const float width = src.scale_x * 2.0f;
|
||||
const float height = src.scale_y * 2.0f;
|
||||
|
@ -239,7 +239,7 @@ public:
|
|||
index.type = type;
|
||||
}
|
||||
|
||||
void Bind(const VKDevice& device, VKScheduler& scheduler) const {
|
||||
void Bind(const Device& device, VKScheduler& scheduler) const {
|
||||
// Use this large switch case to avoid dispatching more memory in the record lambda than
|
||||
// what we need. It looks horrible, but it's the best we can do on standard C++.
|
||||
switch (vertex.num_buffers) {
|
||||
|
@ -330,7 +330,7 @@ private:
|
|||
} index;
|
||||
|
||||
template <size_t N>
|
||||
void BindStatic(const VKDevice& device, VKScheduler& scheduler) const {
|
||||
void BindStatic(const Device& device, VKScheduler& scheduler) const {
|
||||
if (device.IsExtExtendedDynamicStateSupported()) {
|
||||
if (index.buffer) {
|
||||
BindStatic<N, true, true>(scheduler);
|
||||
|
@ -409,7 +409,7 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
|
|||
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
||||
Tegra::MemoryManager& gpu_memory_,
|
||||
Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_,
|
||||
const VKDevice& device_, VKMemoryManager& memory_manager_,
|
||||
const Device& device_, VKMemoryManager& memory_manager_,
|
||||
StateTracker& state_tracker_, VKScheduler& scheduler_)
|
||||
: RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
|
||||
gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
|
||||
|
|
|
@ -55,7 +55,7 @@ class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
|
|||
public:
|
||||
explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
|
||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
||||
VKScreenInfo& screen_info_, const VKDevice& device_,
|
||||
VKScreenInfo& screen_info_, const Device& device_,
|
||||
VKMemoryManager& memory_manager_, StateTracker& state_tracker_,
|
||||
VKScheduler& scheduler_);
|
||||
~RasterizerVulkan() override;
|
||||
|
@ -212,7 +212,7 @@ private:
|
|||
Tegra::Engines::KeplerCompute& kepler_compute;
|
||||
|
||||
VKScreenInfo& screen_info;
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKMemoryManager& memory_manager;
|
||||
StateTracker& state_tracker;
|
||||
VKScheduler& scheduler;
|
||||
|
|
|
@ -37,7 +37,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
|||
last = nullptr;
|
||||
}
|
||||
|
||||
VKScheduler::VKScheduler(const VKDevice& device_, StateTracker& state_tracker_)
|
||||
VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
||||
: device{device_}, state_tracker{state_tracker_},
|
||||
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
||||
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
||||
|
|
|
@ -17,17 +17,17 @@
|
|||
namespace Vulkan {
|
||||
|
||||
class CommandPool;
|
||||
class Device;
|
||||
class Framebuffer;
|
||||
class MasterSemaphore;
|
||||
class StateTracker;
|
||||
class VKDevice;
|
||||
class VKQueryCache;
|
||||
|
||||
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
|
||||
/// OpenGL-like operations on Vulkan command buffers.
|
||||
class VKScheduler {
|
||||
public:
|
||||
explicit VKScheduler(const VKDevice& device, StateTracker& state_tracker);
|
||||
explicit VKScheduler(const Device& device, StateTracker& state_tracker);
|
||||
~VKScheduler();
|
||||
|
||||
/// Returns the current command buffer tick.
|
||||
|
@ -179,7 +179,7 @@ private:
|
|||
|
||||
void AcquireNewChunk();
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
StateTracker& state_tracker;
|
||||
|
||||
std::unique_ptr<MasterSemaphore> master_semaphore;
|
||||
|
|
|
@ -274,7 +274,7 @@ bool IsPrecise(Operation operand) {
|
|||
|
||||
class SPIRVDecompiler final : public Sirit::Module {
|
||||
public:
|
||||
explicit SPIRVDecompiler(const VKDevice& device_, const ShaderIR& ir_, ShaderType stage_,
|
||||
explicit SPIRVDecompiler(const Device& device_, const ShaderIR& ir_, ShaderType stage_,
|
||||
const Registry& registry_, const Specialization& specialization_)
|
||||
: Module(0x00010300), device{device_}, ir{ir_}, stage{stage_}, header{ir_.GetHeader()},
|
||||
registry{registry_}, specialization{specialization_} {
|
||||
|
@ -2742,7 +2742,7 @@ private:
|
|||
};
|
||||
static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
const ShaderIR& ir;
|
||||
const ShaderType stage;
|
||||
const Tegra::Shader::Header header;
|
||||
|
@ -3130,7 +3130,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
|||
return entries;
|
||||
}
|
||||
|
||||
std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
ShaderType stage, const VideoCommon::Shader::Registry& registry,
|
||||
const Specialization& specialization) {
|
||||
return SPIRVDecompiler(device, ir, stage, registry, specialization).Assemble();
|
||||
|
|
|
@ -15,10 +15,8 @@
|
|||
#include "video_core/shader/shader_ir.h"
|
||||
|
||||
namespace Vulkan {
|
||||
class VKDevice;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
class Device;
|
||||
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
using UniformTexelEntry = VideoCommon::Shader::SamplerEntry;
|
||||
|
@ -109,7 +107,7 @@ struct SPIRVShader {
|
|||
|
||||
ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir);
|
||||
|
||||
std::vector<u32> Decompile(const VKDevice& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
|
||||
Tegra::Engines::ShaderType stage,
|
||||
const VideoCommon::Shader::Registry& registry,
|
||||
const Specialization& specialization);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
vk::ShaderModule BuildShader(const VKDevice& device, std::span<const u32> code) {
|
||||
vk::ShaderModule BuildShader(const Device& device, std::span<const u32> code) {
|
||||
return device.GetLogical().CreateShaderModule({
|
||||
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
|
||||
vk::ShaderModule BuildShader(const VKDevice& device, std::span<const u32> code);
|
||||
vk::ShaderModule BuildShader(const Device& device, std::span<const u32> code);
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace Vulkan {
|
|||
VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_)
|
||||
: buffer{std::move(buffer_)} {}
|
||||
|
||||
VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device_, VKMemoryManager& memory_manager_,
|
||||
VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
|
||||
VKScheduler& scheduler_)
|
||||
: device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
|
||||
struct VKBuffer final {
|
||||
|
@ -24,7 +24,7 @@ struct VKBuffer final {
|
|||
|
||||
class VKStagingBufferPool final {
|
||||
public:
|
||||
explicit VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
|
||||
explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager,
|
||||
VKScheduler& scheduler);
|
||||
~VKStagingBufferPool();
|
||||
|
||||
|
@ -58,7 +58,7 @@ private:
|
|||
|
||||
u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2);
|
||||
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKMemoryManager& memory_manager;
|
||||
VKScheduler& scheduler;
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKStreamBuffer::VKStreamBuffer(const VKDevice& device_, VKScheduler& scheduler_)
|
||||
VKStreamBuffer::VKStreamBuffer(const Device& device_, VKScheduler& scheduler_)
|
||||
: device{device_}, scheduler{scheduler_} {
|
||||
CreateBuffers();
|
||||
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
|
||||
|
|
|
@ -13,13 +13,13 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKFenceWatch;
|
||||
class VKScheduler;
|
||||
|
||||
class VKStreamBuffer final {
|
||||
public:
|
||||
explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler);
|
||||
explicit VKStreamBuffer(const Device& device, VKScheduler& scheduler);
|
||||
~VKStreamBuffer();
|
||||
|
||||
/**
|
||||
|
@ -54,7 +54,7 @@ private:
|
|||
|
||||
void WaitPendingOperations(u64 requested_upper_bound);
|
||||
|
||||
const VKDevice& device; ///< Vulkan device manager.
|
||||
const Device& device; ///< Vulkan device manager.
|
||||
VKScheduler& scheduler; ///< Command scheduler.
|
||||
|
||||
vk::Buffer buffer; ///< Mapped buffer.
|
||||
|
|
|
@ -56,7 +56,7 @@ VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 wi
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const VKDevice& device_, VKScheduler& scheduler_)
|
||||
VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKScheduler& scheduler_)
|
||||
: surface{surface_}, device{device_}, scheduler{scheduler_} {}
|
||||
|
||||
VKSwapchain::~VKSwapchain() = default;
|
||||
|
|
|
@ -15,12 +15,12 @@ struct FramebufferLayout;
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
|
||||
class VKSwapchain {
|
||||
public:
|
||||
explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device, VKScheduler& scheduler);
|
||||
explicit VKSwapchain(VkSurfaceKHR surface, const Device& device, VKScheduler& scheduler);
|
||||
~VKSwapchain();
|
||||
|
||||
/// Creates (or recreates) the swapchain with a given size.
|
||||
|
@ -73,7 +73,7 @@ private:
|
|||
void Destroy();
|
||||
|
||||
const VkSurfaceKHR surface;
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
|
||||
vk::SwapchainKHR swapchain;
|
||||
|
|
|
@ -93,7 +93,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImageCreateInfo MakeImageCreateInfo(const VKDevice& device, const ImageInfo& info) {
|
||||
[[nodiscard]] VkImageCreateInfo MakeImageCreateInfo(const Device& device, const ImageInfo& info) {
|
||||
const auto format_info = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, info.format);
|
||||
VkImageCreateFlags flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
|
||||
if (info.type == ImageType::e2D && info.resources.layers >= 6 &&
|
||||
|
@ -146,14 +146,14 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
};
|
||||
}
|
||||
|
||||
[[nodiscard]] vk::Image MakeImage(const VKDevice& device, const ImageInfo& info) {
|
||||
[[nodiscard]] vk::Image MakeImage(const Device& device, const ImageInfo& info) {
|
||||
if (info.type == ImageType::Buffer) {
|
||||
return vk::Image{};
|
||||
}
|
||||
return device.GetLogical().CreateImage(MakeImageCreateInfo(device, info));
|
||||
}
|
||||
|
||||
[[nodiscard]] vk::Buffer MakeBuffer(const VKDevice& device, const ImageInfo& info) {
|
||||
[[nodiscard]] vk::Buffer MakeBuffer(const Device& device, const ImageInfo& info) {
|
||||
if (info.type != ImageType::Buffer) {
|
||||
return vk::Buffer{};
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] VkAttachmentDescription AttachmentDescription(const VKDevice& device,
|
||||
[[nodiscard]] VkAttachmentDescription AttachmentDescription(const Device& device,
|
||||
const ImageView* image_view) {
|
||||
const auto pixel_format = image_view->format;
|
||||
return VkAttachmentDescription{
|
||||
|
|
|
@ -19,11 +19,11 @@ using VideoCommon::Offset2D;
|
|||
using VideoCommon::RenderTargets;
|
||||
using VideoCore::Surface::PixelFormat;
|
||||
|
||||
class VKDevice;
|
||||
class VKScheduler;
|
||||
class VKStagingBufferPool;
|
||||
|
||||
class BlitImageHelper;
|
||||
class Device;
|
||||
class Image;
|
||||
class ImageView;
|
||||
class Framebuffer;
|
||||
|
@ -68,7 +68,7 @@ struct ImageBufferMap {
|
|||
};
|
||||
|
||||
struct TextureCacheRuntime {
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
VKMemoryManager& memory_manager;
|
||||
VKStagingBufferPool& staging_buffer_pool;
|
||||
|
@ -177,7 +177,7 @@ public:
|
|||
private:
|
||||
[[nodiscard]] vk::ImageView MakeDepthStencilView(VkImageAspectFlags aspect_mask);
|
||||
|
||||
const VKDevice* device = nullptr;
|
||||
const Device* device = nullptr;
|
||||
std::array<vk::ImageView, VideoCommon::NUM_IMAGE_VIEW_TYPES> image_views;
|
||||
vk::ImageView depth_view;
|
||||
vk::ImageView stencil_view;
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const VKDevice& device_, VKScheduler& scheduler_)
|
||||
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_)
|
||||
: device{device_}, scheduler{scheduler_} {}
|
||||
|
||||
VKUpdateDescriptorQueue::~VKUpdateDescriptorQueue() = default;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
class VKDevice;
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
|
||||
struct DescriptorUpdateEntry {
|
||||
|
@ -31,7 +31,7 @@ struct DescriptorUpdateEntry {
|
|||
|
||||
class VKUpdateDescriptorQueue final {
|
||||
public:
|
||||
explicit VKUpdateDescriptorQueue(const VKDevice& device_, VKScheduler& scheduler_);
|
||||
explicit VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_);
|
||||
~VKUpdateDescriptorQueue();
|
||||
|
||||
void TickFrame();
|
||||
|
@ -69,7 +69,7 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
const VKDevice& device;
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
|
||||
const DescriptorUpdateEntry* upload_start = nullptr;
|
||||
|
|
|
@ -134,7 +134,7 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
|
|||
}
|
||||
|
||||
void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
|
||||
const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler,
|
||||
const Vulkan::Device& device, Vulkan::VKScheduler& scheduler,
|
||||
Vulkan::VKDescriptorPool& descriptor_pool,
|
||||
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
std::vector<VkDescriptorSetLayoutBinding> bindings,
|
||||
|
|
|
@ -94,7 +94,7 @@ public:
|
|||
CompilerSettings compiler_settings, const Registry& registry,
|
||||
VAddr cpu_addr);
|
||||
|
||||
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device,
|
||||
void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::Device& device,
|
||||
Vulkan::VKScheduler& scheduler,
|
||||
Vulkan::VKDescriptorPool& descriptor_pool,
|
||||
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
|
@ -123,7 +123,7 @@ private:
|
|||
|
||||
// For Vulkan
|
||||
Vulkan::VKPipelineCache* pp_cache;
|
||||
const Vulkan::VKDevice* vk_device;
|
||||
const Vulkan::Device* vk_device;
|
||||
Vulkan::VKScheduler* scheduler;
|
||||
Vulkan::VKDescriptorPool* descriptor_pool;
|
||||
Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue;
|
||||
|
|
Loading…
Reference in a new issue