mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-02 11:42:47 +01:00
vk_memory_manager: Improve memory manager and its API
Fix a bug where the memory allocator could leave gaps between commits. To fix this the allocation algorithm was reworked, although it's still short in number of lines of code. Rework the allocation API to self-contained movable objects instead of naively using an unique_ptr to do the job for us. Remove the VK prefix.
This commit is contained in:
parent
f728a504aa
commit
e996f1ad09
13 changed files with 319 additions and 344 deletions
|
@ -150,8 +150,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
SetUniformData(data, framebuffer);
|
SetUniformData(data, framebuffer);
|
||||||
SetVertexData(data, framebuffer);
|
SetVertexData(data, framebuffer);
|
||||||
|
|
||||||
auto map = buffer_commit->Map();
|
const std::span<u8> map = buffer_commit.Map();
|
||||||
std::memcpy(map.Address(), &data, sizeof(data));
|
std::memcpy(map.data(), &data, sizeof(data));
|
||||||
|
|
||||||
if (!use_accelerated) {
|
if (!use_accelerated) {
|
||||||
const u64 image_offset = GetRawImageOffset(framebuffer, image_index);
|
const u64 image_offset = GetRawImageOffset(framebuffer, image_index);
|
||||||
|
@ -165,8 +165,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
constexpr u32 block_height_log2 = 4;
|
constexpr u32 block_height_log2 = 4;
|
||||||
const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer);
|
const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer);
|
||||||
Tegra::Texture::UnswizzleTexture(
|
Tegra::Texture::UnswizzleTexture(
|
||||||
std::span(map.Address() + image_offset, size_bytes), std::span(host_ptr, size_bytes),
|
map.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), bytes_per_pixel,
|
||||||
bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0);
|
framebuffer.width, framebuffer.height, 1, block_height_log2, 0);
|
||||||
|
|
||||||
const VkBufferImageCopy copy{
|
const VkBufferImageCopy copy{
|
||||||
.bufferOffset = image_offset,
|
.bufferOffset = image_offset,
|
||||||
|
@ -224,8 +224,6 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
map.Release();
|
|
||||||
|
|
||||||
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
|
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
|
||||||
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
|
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
|
||||||
size = swapchain.GetSize(), pipeline = *pipeline,
|
size = swapchain.GetSize(), pipeline = *pipeline,
|
||||||
|
@ -642,7 +640,7 @@ void VKBlitScreen::ReleaseRawImages() {
|
||||||
raw_images.clear();
|
raw_images.clear();
|
||||||
raw_buffer_commits.clear();
|
raw_buffer_commits.clear();
|
||||||
buffer.reset();
|
buffer.reset();
|
||||||
buffer_commit.reset();
|
buffer_commit = MemoryCommit{};
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
|
void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
|
||||||
|
|
|
@ -104,14 +104,14 @@ private:
|
||||||
vk::Sampler sampler;
|
vk::Sampler sampler;
|
||||||
|
|
||||||
vk::Buffer buffer;
|
vk::Buffer buffer;
|
||||||
VKMemoryCommit buffer_commit;
|
MemoryCommit buffer_commit;
|
||||||
|
|
||||||
std::vector<u64> resource_ticks;
|
std::vector<u64> resource_ticks;
|
||||||
|
|
||||||
std::vector<vk::Semaphore> semaphores;
|
std::vector<vk::Semaphore> semaphores;
|
||||||
std::vector<vk::Image> raw_images;
|
std::vector<vk::Image> raw_images;
|
||||||
std::vector<vk::ImageView> raw_image_views;
|
std::vector<vk::ImageView> raw_image_views;
|
||||||
std::vector<VKMemoryCommit> raw_buffer_commits;
|
std::vector<MemoryCommit> raw_buffer_commits;
|
||||||
u32 raw_width = 0;
|
u32 raw_width = 0;
|
||||||
u32 raw_height = 0;
|
u32 raw_height = 0;
|
||||||
};
|
};
|
||||||
|
|
|
@ -37,10 +37,10 @@ constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS =
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
||||||
VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_)
|
StagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_)
|
||||||
: BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{
|
: BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{
|
||||||
staging_pool_} {
|
staging_pool_} {
|
||||||
const VkBufferCreateInfo ci{
|
buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
|
||||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
|
@ -49,22 +49,20 @@ Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKSchedul
|
||||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
.queueFamilyIndexCount = 0,
|
.queueFamilyIndexCount = 0,
|
||||||
.pQueueFamilyIndices = nullptr,
|
.pQueueFamilyIndices = nullptr,
|
||||||
};
|
});
|
||||||
|
commit = memory_manager.Commit(buffer, false);
|
||||||
buffer.handle = device.GetLogical().CreateBuffer(ci);
|
|
||||||
buffer.commit = memory_manager.Commit(buffer.handle, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Buffer::~Buffer() = default;
|
Buffer::~Buffer() = default;
|
||||||
|
|
||||||
void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
|
void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
|
||||||
const auto& staging = staging_pool.GetUnusedBuffer(data_size, true);
|
const auto& staging = staging_pool.Request(data_size, true);
|
||||||
std::memcpy(staging.commit->Map(data_size), data, data_size);
|
std::memcpy(staging.mapped_span.data(), data, data_size);
|
||||||
|
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
|
||||||
const VkBuffer handle = Handle();
|
const VkBuffer handle = Handle();
|
||||||
scheduler.Record([staging = *staging.handle, handle, offset, data_size,
|
scheduler.Record([staging = staging.buffer, handle, offset, data_size,
|
||||||
&device = device](vk::CommandBuffer cmdbuf) {
|
&device = device](vk::CommandBuffer cmdbuf) {
|
||||||
const VkBufferMemoryBarrier read_barrier{
|
const VkBufferMemoryBarrier read_barrier{
|
||||||
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||||
|
@ -100,12 +98,12 @@ void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
|
void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
|
||||||
const auto& staging = staging_pool.GetUnusedBuffer(data_size, true);
|
auto staging = staging_pool.Request(data_size, true);
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
|
||||||
const VkBuffer handle = Handle();
|
const VkBuffer handle = Handle();
|
||||||
scheduler.Record(
|
scheduler.Record(
|
||||||
[staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) {
|
[staging = staging.buffer, handle, offset, data_size](vk::CommandBuffer cmdbuf) {
|
||||||
const VkBufferMemoryBarrier barrier{
|
const VkBufferMemoryBarrier barrier{
|
||||||
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
@ -126,7 +124,7 @@ void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
|
||||||
});
|
});
|
||||||
scheduler.Finish();
|
scheduler.Finish();
|
||||||
|
|
||||||
std::memcpy(data, staging.commit->Map(data_size), data_size);
|
std::memcpy(data, staging.mapped_span.data(), data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset,
|
void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset,
|
||||||
|
@ -166,7 +164,7 @@ VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
||||||
const Device& device_, VKMemoryManager& memory_manager_,
|
const Device& device_, VKMemoryManager& memory_manager_,
|
||||||
VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_,
|
VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_,
|
||||||
VKStagingBufferPool& staging_pool_)
|
StagingBufferPool& staging_pool_)
|
||||||
: VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_,
|
: VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_,
|
||||||
cpu_memory_, stream_buffer_},
|
cpu_memory_, stream_buffer_},
|
||||||
device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
|
device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
|
||||||
|
@ -181,12 +179,12 @@ std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t s
|
||||||
|
|
||||||
VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) {
|
VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) {
|
||||||
size = std::max(size, std::size_t(4));
|
size = std::max(size, std::size_t(4));
|
||||||
const auto& empty = staging_pool.GetUnusedBuffer(size, false);
|
const auto& empty = staging_pool.Request(size, false);
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([size, buffer = empty.buffer](vk::CommandBuffer cmdbuf) {
|
||||||
cmdbuf.FillBuffer(buffer, 0, size, 0);
|
cmdbuf.FillBuffer(buffer, 0, size, 0);
|
||||||
});
|
});
|
||||||
return {*empty.handle, 0, 0};
|
return {empty.buffer, 0, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -10,19 +10,19 @@
|
||||||
#include "video_core/buffer_cache/buffer_cache.h"
|
#include "video_core/buffer_cache/buffer_cache.h"
|
||||||
#include "video_core/renderer_vulkan/vk_memory_manager.h"
|
#include "video_core/renderer_vulkan/vk_memory_manager.h"
|
||||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||||
|
#include "video_core/renderer_vulkan/vk_memory_manager.h"
|
||||||
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
|
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
|
||||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class VKMemoryManager;
|
|
||||||
class VKScheduler;
|
class VKScheduler;
|
||||||
|
|
||||||
class Buffer final : public VideoCommon::BufferBlock {
|
class Buffer final : public VideoCommon::BufferBlock {
|
||||||
public:
|
public:
|
||||||
explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
||||||
VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_);
|
StagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_);
|
||||||
~Buffer();
|
~Buffer();
|
||||||
|
|
||||||
void Upload(std::size_t offset, std::size_t data_size, const u8* data);
|
void Upload(std::size_t offset, std::size_t data_size, const u8* data);
|
||||||
|
@ -33,7 +33,7 @@ public:
|
||||||
std::size_t copy_size);
|
std::size_t copy_size);
|
||||||
|
|
||||||
VkBuffer Handle() const {
|
VkBuffer Handle() const {
|
||||||
return *buffer.handle;
|
return *buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 Address() const {
|
u64 Address() const {
|
||||||
|
@ -43,9 +43,10 @@ public:
|
||||||
private:
|
private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKStagingBufferPool& staging_pool;
|
StagingBufferPool& staging_pool;
|
||||||
|
|
||||||
VKBuffer buffer;
|
vk::Buffer buffer;
|
||||||
|
MemoryCommit commit;
|
||||||
};
|
};
|
||||||
|
|
||||||
class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
|
class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
|
||||||
|
@ -54,7 +55,7 @@ public:
|
||||||
Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
|
Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
|
||||||
const Device& device, VKMemoryManager& memory_manager,
|
const Device& device, VKMemoryManager& memory_manager,
|
||||||
VKScheduler& scheduler, VKStreamBuffer& stream_buffer,
|
VKScheduler& scheduler, VKStreamBuffer& stream_buffer,
|
||||||
VKStagingBufferPool& staging_pool);
|
StagingBufferPool& staging_pool);
|
||||||
~VKBufferCache();
|
~VKBufferCache();
|
||||||
|
|
||||||
BufferInfo GetEmptyBuffer(std::size_t size) override;
|
BufferInfo GetEmptyBuffer(std::size_t size) override;
|
||||||
|
@ -66,7 +67,7 @@ private:
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKMemoryManager& memory_manager;
|
VKMemoryManager& memory_manager;
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKStagingBufferPool& staging_pool;
|
StagingBufferPool& staging_pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -164,7 +164,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet(
|
||||||
|
|
||||||
QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool_,
|
VKDescriptorPool& descriptor_pool_,
|
||||||
VKStagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||||
: VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(),
|
: VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(),
|
||||||
BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
|
BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
|
||||||
|
@ -177,18 +177,18 @@ QuadArrayPass::~QuadArrayPass() = default;
|
||||||
std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
|
std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
|
||||||
const u32 num_triangle_vertices = (num_vertices / 4) * 6;
|
const u32 num_triangle_vertices = (num_vertices / 4) * 6;
|
||||||
const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
|
const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
|
||||||
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
|
const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
|
||||||
|
|
||||||
update_descriptor_queue.Acquire();
|
update_descriptor_queue.Acquire();
|
||||||
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
|
update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
|
||||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||||
|
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
|
||||||
ASSERT(num_vertices % 4 == 0);
|
ASSERT(num_vertices % 4 == 0);
|
||||||
const u32 num_quads = num_vertices / 4;
|
const u32 num_quads = num_vertices / 4;
|
||||||
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads,
|
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer,
|
||||||
first, set](vk::CommandBuffer cmdbuf) {
|
num_quads, first, set](vk::CommandBuffer cmdbuf) {
|
||||||
constexpr u32 dispatch_size = 1024;
|
constexpr u32 dispatch_size = 1024;
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
|
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
|
||||||
|
@ -208,11 +208,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
|
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
|
||||||
});
|
});
|
||||||
return {*buffer.handle, 0};
|
return {staging_ref.buffer, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
|
Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_,
|
VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||||
: VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
|
: VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
|
||||||
BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
|
BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
|
||||||
|
@ -224,15 +224,15 @@ Uint8Pass::~Uint8Pass() = default;
|
||||||
std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
|
std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
|
||||||
u64 src_offset) {
|
u64 src_offset) {
|
||||||
const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
|
const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
|
||||||
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
|
const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
|
||||||
|
|
||||||
update_descriptor_queue.Acquire();
|
update_descriptor_queue.Acquire();
|
||||||
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
|
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
|
||||||
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
|
update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
|
||||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||||
|
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
|
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
|
||||||
num_vertices](vk::CommandBuffer cmdbuf) {
|
num_vertices](vk::CommandBuffer cmdbuf) {
|
||||||
constexpr u32 dispatch_size = 1024;
|
constexpr u32 dispatch_size = 1024;
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||||
|
@ -252,12 +252,12 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
|
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
|
||||||
});
|
});
|
||||||
return {*buffer.handle, 0};
|
return {staging_ref.buffer, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool_,
|
VKDescriptorPool& descriptor_pool_,
|
||||||
VKStagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||||
: VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
|
: VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
|
||||||
BuildInputOutputDescriptorUpdateTemplate(),
|
BuildInputOutputDescriptorUpdateTemplate(),
|
||||||
|
@ -286,15 +286,15 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
|
||||||
const u32 num_tri_vertices = (num_vertices / 4) * 6;
|
const u32 num_tri_vertices = (num_vertices / 4) * 6;
|
||||||
|
|
||||||
const std::size_t staging_size = num_tri_vertices * sizeof(u32);
|
const std::size_t staging_size = num_tri_vertices * sizeof(u32);
|
||||||
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
|
const auto staging_ref = staging_buffer_pool.Request(staging_size, false);
|
||||||
|
|
||||||
update_descriptor_queue.Acquire();
|
update_descriptor_queue.Acquire();
|
||||||
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
|
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
|
||||||
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
|
update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
|
||||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||||
|
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
|
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
|
||||||
num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
|
num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
|
||||||
static constexpr u32 dispatch_size = 1024;
|
static constexpr u32 dispatch_size = 1024;
|
||||||
const std::array push_constants = {base_vertex, index_shift};
|
const std::array push_constants = {base_vertex, index_shift};
|
||||||
|
@ -317,7 +317,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
|
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
|
||||||
});
|
});
|
||||||
return {*buffer.handle, 0};
|
return {staging_ref.buffer, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
|
class StagingBufferPool;
|
||||||
class VKScheduler;
|
class VKScheduler;
|
||||||
class VKStagingBufferPool;
|
|
||||||
class VKUpdateDescriptorQueue;
|
class VKUpdateDescriptorQueue;
|
||||||
|
|
||||||
class VKComputePass {
|
class VKComputePass {
|
||||||
|
@ -45,7 +45,7 @@ class QuadArrayPass final : public VKComputePass {
|
||||||
public:
|
public:
|
||||||
explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool_,
|
VKDescriptorPool& descriptor_pool_,
|
||||||
VKStagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||||
~QuadArrayPass();
|
~QuadArrayPass();
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKStagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ class Uint8Pass final : public VKComputePass {
|
||||||
public:
|
public:
|
||||||
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool_,
|
VKDescriptorPool& descriptor_pool_,
|
||||||
VKStagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||||
~Uint8Pass();
|
~Uint8Pass();
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKStagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ class QuadIndexedPass final : public VKComputePass {
|
||||||
public:
|
public:
|
||||||
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||||
VKDescriptorPool& descriptor_pool_,
|
VKDescriptorPool& descriptor_pool_,
|
||||||
VKStagingBufferPool& staging_buffer_pool_,
|
StagingBufferPool& staging_buffer_pool_,
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||||
~QuadIndexedPass();
|
~QuadIndexedPass();
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKStagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <bit>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
@ -16,92 +17,93 @@
|
||||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
struct Range {
|
||||||
|
u64 begin;
|
||||||
|
u64 end;
|
||||||
|
|
||||||
u64 GetAllocationChunkSize(u64 required_size) {
|
[[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept {
|
||||||
static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20};
|
return iterator < end && begin < iterator + size;
|
||||||
auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size);
|
}
|
||||||
return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20);
|
};
|
||||||
|
|
||||||
|
[[nodiscard]] u64 GetAllocationChunkSize(u64 required_size) {
|
||||||
|
static constexpr std::array sizes{
|
||||||
|
0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10,
|
||||||
|
0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10,
|
||||||
|
0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10,
|
||||||
|
};
|
||||||
|
static_assert(std::is_sorted(sizes.begin(), sizes.end()));
|
||||||
|
|
||||||
|
const auto it = std::ranges::lower_bound(sizes, required_size);
|
||||||
|
return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
class VKMemoryAllocation final {
|
class MemoryAllocation {
|
||||||
public:
|
public:
|
||||||
explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
|
explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
|
||||||
VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
|
VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
|
||||||
: device{device_}, memory{std::move(memory_)}, properties{properties_},
|
: device{device_}, memory{std::move(memory_)}, properties{properties_},
|
||||||
allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
|
allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
|
||||||
|
|
||||||
VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
|
[[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) {
|
||||||
auto found = TryFindFreeSection(free_iterator, allocation_size,
|
const std::optional<u64> alloc = FindFreeRegion(size, alignment);
|
||||||
static_cast<u64>(commit_size), static_cast<u64>(alignment));
|
if (!alloc) {
|
||||||
if (!found) {
|
// Signal out of memory, it'll try to do more allocations.
|
||||||
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
|
return std::nullopt;
|
||||||
static_cast<u64>(alignment));
|
|
||||||
if (!found) {
|
|
||||||
// Signal out of memory, it'll try to do more allocations.
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found,
|
const Range range{
|
||||||
*found + commit_size);
|
.begin = *alloc,
|
||||||
commits.push_back(commit.get());
|
.end = *alloc + size,
|
||||||
|
};
|
||||||
// Last commit's address is highly probable to be free.
|
commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range);
|
||||||
free_iterator = *found + commit_size;
|
return std::make_optional<MemoryCommit>(device, this, *memory, *alloc, *alloc + size);
|
||||||
|
|
||||||
return commit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(const VKMemoryCommitImpl* commit) {
|
void Free(u64 begin) {
|
||||||
ASSERT(commit);
|
const auto it = std::ranges::find(commits, begin, &Range::begin);
|
||||||
|
ASSERT_MSG(it != commits.end(), "Invalid commit");
|
||||||
const auto it = std::find(std::begin(commits), std::end(commits), commit);
|
|
||||||
if (it == commits.end()) {
|
|
||||||
UNREACHABLE_MSG("Freeing unallocated commit!");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
commits.erase(it);
|
commits.erase(it);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] std::span<u8> Map() {
|
||||||
|
if (!memory_mapped_span.empty()) {
|
||||||
|
return memory_mapped_span;
|
||||||
|
}
|
||||||
|
u8* const raw_pointer = memory.Map(0, allocation_size);
|
||||||
|
memory_mapped_span = std::span<u8>(raw_pointer, allocation_size);
|
||||||
|
return memory_mapped_span;
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns whether this allocation is compatible with the arguments.
|
/// Returns whether this allocation is compatible with the arguments.
|
||||||
bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
|
[[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
|
||||||
return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
|
return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr u32 ShiftType(u32 type) {
|
[[nodiscard]] static constexpr u32 ShiftType(u32 type) {
|
||||||
return 1U << type;
|
return 1U << type;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
[[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept {
|
||||||
/// requirements.
|
ASSERT(std::has_single_bit(alignment));
|
||||||
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
|
const u64 alignment_log2 = std::countr_zero(alignment);
|
||||||
u64 iterator = Common::AlignUp(start, alignment);
|
std::optional<u64> candidate;
|
||||||
while (iterator + size <= end) {
|
u64 iterator = 0;
|
||||||
const u64 try_left = iterator;
|
auto commit = commits.begin();
|
||||||
const u64 try_right = try_left + size;
|
while (iterator + size <= allocation_size) {
|
||||||
|
candidate = candidate.value_or(iterator);
|
||||||
bool overlap = false;
|
if (commit == commits.end()) {
|
||||||
for (const auto& commit : commits) {
|
break;
|
||||||
const auto [commit_left, commit_right] = commit->interval;
|
|
||||||
if (try_left < commit_right && commit_left < try_right) {
|
|
||||||
// There's an overlap, continue the search where the overlapping commit ends.
|
|
||||||
iterator = Common::AlignUp(commit_right, alignment);
|
|
||||||
overlap = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!overlap) {
|
if (commit->Contains(*candidate, size)) {
|
||||||
// A free address has been found.
|
candidate = std::nullopt;
|
||||||
return try_left;
|
|
||||||
}
|
}
|
||||||
|
iterator = Common::AlignUpLog2(commit->end, alignment_log2);
|
||||||
|
++commit;
|
||||||
}
|
}
|
||||||
|
return candidate;
|
||||||
// No free regions where found, return an empty optional.
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const Device& device; ///< Vulkan device.
|
const Device& device; ///< Vulkan device.
|
||||||
|
@ -109,21 +111,52 @@ private:
|
||||||
const VkMemoryPropertyFlags properties; ///< Vulkan properties.
|
const VkMemoryPropertyFlags properties; ///< Vulkan properties.
|
||||||
const u64 allocation_size; ///< Size of this allocation.
|
const u64 allocation_size; ///< Size of this allocation.
|
||||||
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
||||||
|
std::vector<Range> commits; ///< All commit ranges done from this allocation.
|
||||||
/// Hints where the next free region is likely going to be.
|
std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before.
|
||||||
u64 free_iterator{};
|
|
||||||
|
|
||||||
/// Stores all commits done from this allocation.
|
|
||||||
std::vector<const VKMemoryCommitImpl*> commits;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
MemoryCommit::MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
|
||||||
|
VkDeviceMemory memory_, u64 begin, u64 end) noexcept
|
||||||
|
: device{&device_}, allocation{allocation_}, memory{memory_}, interval{begin, end} {}
|
||||||
|
|
||||||
|
MemoryCommit::~MemoryCommit() {
|
||||||
|
Release();
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept {
|
||||||
|
Release();
|
||||||
|
device = rhs.device;
|
||||||
|
allocation = std::exchange(rhs.allocation, nullptr);
|
||||||
|
memory = rhs.memory;
|
||||||
|
interval = rhs.interval;
|
||||||
|
span = std::exchange(rhs.span, std::span<u8>{});
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept
|
||||||
|
: device{rhs.device}, allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory},
|
||||||
|
interval{rhs.interval}, span{std::exchange(rhs.span, std::span<u8>{})} {}
|
||||||
|
|
||||||
|
std::span<u8> MemoryCommit::Map() {
|
||||||
|
if (!span.empty()) {
|
||||||
|
return span;
|
||||||
|
}
|
||||||
|
span = allocation->Map().subspan(interval.first, interval.second - interval.first);
|
||||||
|
return span;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryCommit::Release() {
|
||||||
|
if (allocation) {
|
||||||
|
allocation->Free(interval.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
VKMemoryManager::VKMemoryManager(const Device& device_)
|
VKMemoryManager::VKMemoryManager(const Device& device_)
|
||||||
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
|
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
|
||||||
|
|
||||||
VKMemoryManager::~VKMemoryManager() = default;
|
VKMemoryManager::~VKMemoryManager() = default;
|
||||||
|
|
||||||
VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
|
MemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements, bool host_visible) {
|
||||||
bool host_visible) {
|
|
||||||
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
|
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
|
||||||
|
|
||||||
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
||||||
|
@ -131,39 +164,31 @@ VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
|
||||||
const VkMemoryPropertyFlags wanted_properties =
|
const VkMemoryPropertyFlags wanted_properties =
|
||||||
host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
|
||||||
: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||||
|
if (std::optional<MemoryCommit> commit = TryAllocCommit(requirements, wanted_properties)) {
|
||||||
if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
|
return std::move(*commit);
|
||||||
return commit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit has failed, allocate more memory.
|
// Commit has failed, allocate more memory.
|
||||||
if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) {
|
// TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory.
|
||||||
// TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
|
AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size);
|
||||||
// Allocation has failed, panic.
|
|
||||||
UNREACHABLE_MSG("Ran out of VRAM!");
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
// Commit again, this time it won't fail since there's a fresh allocation above.
|
||||||
// there's a bug.
|
// If it does, there's a bug.
|
||||||
auto commit = TryAllocCommit(requirements, wanted_properties);
|
return TryAllocCommit(requirements, wanted_properties).value();
|
||||||
ASSERT(commit);
|
|
||||||
return commit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
|
MemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
|
||||||
auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
|
auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
|
||||||
buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
|
buffer.BindMemory(commit.Memory(), commit.Offset());
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
|
||||||
VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
|
MemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
|
||||||
auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
|
auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
|
||||||
image.BindMemory(commit->GetMemory(), commit->GetOffset());
|
image.BindMemory(commit.Memory(), commit.Offset());
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
|
void VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
|
||||||
u64 size) {
|
u64 size) {
|
||||||
const u32 type = [&] {
|
const u32 type = [&] {
|
||||||
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
|
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
|
||||||
|
@ -176,26 +201,18 @@ bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 t
|
||||||
UNREACHABLE_MSG("Couldn't find a compatible memory type!");
|
UNREACHABLE_MSG("Couldn't find a compatible memory type!");
|
||||||
return 0U;
|
return 0U;
|
||||||
}();
|
}();
|
||||||
|
vk::DeviceMemory memory = device.GetLogical().AllocateMemory({
|
||||||
// Try to allocate found type.
|
|
||||||
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
|
|
||||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
.allocationSize = size,
|
.allocationSize = size,
|
||||||
.memoryTypeIndex = type,
|
.memoryTypeIndex = type,
|
||||||
});
|
});
|
||||||
if (!memory) {
|
allocations.push_back(std::make_unique<MemoryAllocation>(device, std::move(memory),
|
||||||
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
|
wanted_properties, size, type));
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
|
|
||||||
wanted_properties, size, type));
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
|
std::optional<MemoryCommit> VKMemoryManager::TryAllocCommit(
|
||||||
VkMemoryPropertyFlags wanted_properties) {
|
const VkMemoryRequirements& requirements, VkMemoryPropertyFlags wanted_properties) {
|
||||||
for (auto& allocation : allocations) {
|
for (auto& allocation : allocations) {
|
||||||
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
|
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -204,27 +221,7 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
|
||||||
return commit;
|
return commit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return {};
|
return std::nullopt;
|
||||||
}
|
|
||||||
|
|
||||||
VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
|
|
||||||
const vk::DeviceMemory& memory_, u64 begin_, u64 end_)
|
|
||||||
: device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {}
|
|
||||||
|
|
||||||
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
|
|
||||||
allocation->Free(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
|
|
||||||
return MemoryMap(this, std::span<u8>(memory.Map(interval.first + offset_, size), size));
|
|
||||||
}
|
|
||||||
|
|
||||||
void VKMemoryCommitImpl::Unmap() const {
|
|
||||||
memory.Unmap();
|
|
||||||
}
|
|
||||||
|
|
||||||
MemoryMap VKMemoryCommitImpl::Map() const {
|
|
||||||
return Map(interval.second - interval.first);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -15,118 +15,81 @@ namespace Vulkan {
|
||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
class MemoryMap;
|
class MemoryMap;
|
||||||
class VKMemoryAllocation;
|
class MemoryAllocation;
|
||||||
class VKMemoryCommitImpl;
|
|
||||||
|
|
||||||
using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
|
class MemoryCommit final {
|
||||||
|
|
||||||
class VKMemoryManager final {
|
|
||||||
public:
|
public:
|
||||||
explicit VKMemoryManager(const Device& device_);
|
explicit MemoryCommit() noexcept = default;
|
||||||
VKMemoryManager(const VKMemoryManager&) = delete;
|
explicit MemoryCommit(const Device& device_, MemoryAllocation* allocation_,
|
||||||
~VKMemoryManager();
|
VkDeviceMemory memory_, u64 begin, u64 end) noexcept;
|
||||||
|
~MemoryCommit();
|
||||||
|
|
||||||
/**
|
MemoryCommit& operator=(MemoryCommit&&) noexcept;
|
||||||
* Commits a memory with the specified requeriments.
|
MemoryCommit(MemoryCommit&&) noexcept;
|
||||||
* @param requirements Requirements returned from a Vulkan call.
|
|
||||||
* @param host_visible Signals the allocator that it *must* use host visible and coherent
|
|
||||||
* memory. When passing false, it will try to allocate device local memory.
|
|
||||||
* @returns A memory commit.
|
|
||||||
*/
|
|
||||||
VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
|
|
||||||
|
|
||||||
/// Commits memory required by the buffer and binds it.
|
MemoryCommit& operator=(const MemoryCommit&) = delete;
|
||||||
VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
|
MemoryCommit(const MemoryCommit&) = delete;
|
||||||
|
|
||||||
/// Commits memory required by the image and binds it.
|
/// Returns a host visible memory map.
|
||||||
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
|
/// It will map the backing allocation if it hasn't been mapped before.
|
||||||
|
std::span<u8> Map();
|
||||||
private:
|
|
||||||
/// Allocates a chunk of memory.
|
|
||||||
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
|
|
||||||
|
|
||||||
/// Tries to allocate a memory commit.
|
|
||||||
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
|
|
||||||
VkMemoryPropertyFlags wanted_properties);
|
|
||||||
|
|
||||||
const Device& device; ///< Device handler.
|
|
||||||
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
|
|
||||||
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
|
|
||||||
};
|
|
||||||
|
|
||||||
class VKMemoryCommitImpl final {
|
|
||||||
friend VKMemoryAllocation;
|
|
||||||
friend MemoryMap;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
|
|
||||||
const vk::DeviceMemory& memory_, u64 begin_, u64 end_);
|
|
||||||
~VKMemoryCommitImpl();
|
|
||||||
|
|
||||||
/// Maps a memory region and returns a pointer to it.
|
|
||||||
/// It's illegal to have more than one memory map at the same time.
|
|
||||||
MemoryMap Map(u64 size, u64 offset = 0) const;
|
|
||||||
|
|
||||||
/// Maps the whole commit and returns a pointer to it.
|
|
||||||
/// It's illegal to have more than one memory map at the same time.
|
|
||||||
MemoryMap Map() const;
|
|
||||||
|
|
||||||
/// Returns the Vulkan memory handler.
|
/// Returns the Vulkan memory handler.
|
||||||
VkDeviceMemory GetMemory() const {
|
VkDeviceMemory Memory() const {
|
||||||
return *memory;
|
return memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the start position of the commit relative to the allocation.
|
/// Returns the start position of the commit relative to the allocation.
|
||||||
VkDeviceSize GetOffset() const {
|
VkDeviceSize Offset() const {
|
||||||
return static_cast<VkDeviceSize>(interval.first);
|
return static_cast<VkDeviceSize>(interval.first);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Unmaps memory.
|
void Release();
|
||||||
void Unmap() const;
|
|
||||||
|
|
||||||
const Device& device; ///< Vulkan device.
|
const Device* device{}; ///< Vulkan device.
|
||||||
const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
|
MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
||||||
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
VkDeviceMemory memory{}; ///< Vulkan device memory handler.
|
||||||
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
|
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
|
||||||
|
std::span<u8> span; ///< Host visible memory span. Empty if not queried before.
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Holds ownership of a memory map.
|
class VKMemoryManager final {
|
||||||
class MemoryMap final {
|
|
||||||
public:
|
public:
|
||||||
explicit MemoryMap(const VKMemoryCommitImpl* commit_, std::span<u8> span_)
|
explicit VKMemoryManager(const Device& device_);
|
||||||
: commit{commit_}, span{span_} {}
|
~VKMemoryManager();
|
||||||
|
|
||||||
~MemoryMap() {
|
VKMemoryManager& operator=(const VKMemoryManager&) = delete;
|
||||||
if (commit) {
|
VKMemoryManager(const VKMemoryManager&) = delete;
|
||||||
commit->Unmap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prematurely releases the memory map.
|
/**
|
||||||
void Release() {
|
* Commits a memory with the specified requeriments.
|
||||||
commit->Unmap();
|
*
|
||||||
commit = nullptr;
|
* @param requirements Requirements returned from a Vulkan call.
|
||||||
}
|
* @param host_visible Signals the allocator that it *must* use host visible and coherent
|
||||||
|
* memory. When passing false, it will try to allocate device local memory.
|
||||||
|
*
|
||||||
|
* @returns A memory commit.
|
||||||
|
*/
|
||||||
|
MemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
|
||||||
|
|
||||||
/// Returns a span to the memory map.
|
/// Commits memory required by the buffer and binds it.
|
||||||
[[nodiscard]] std::span<u8> Span() const noexcept {
|
MemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
|
||||||
return span;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the address of the memory map.
|
/// Commits memory required by the image and binds it.
|
||||||
[[nodiscard]] u8* Address() const noexcept {
|
MemoryCommit Commit(const vk::Image& image, bool host_visible);
|
||||||
return span.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the address of the memory map;
|
|
||||||
[[nodiscard]] operator u8*() const noexcept {
|
|
||||||
return span.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit.
|
/// Allocates a chunk of memory.
|
||||||
std::span<u8> span; ///< Address to the mapped memory.
|
void AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
|
||||||
|
|
||||||
|
/// Tries to allocate a memory commit.
|
||||||
|
std::optional<MemoryCommit> TryAllocCommit(const VkMemoryRequirements& requirements,
|
||||||
|
VkMemoryPropertyFlags wanted_properties);
|
||||||
|
|
||||||
|
const Device& device; ///< Device handler.
|
||||||
|
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
|
||||||
|
std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -218,7 +218,7 @@ private:
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
|
|
||||||
VKStreamBuffer stream_buffer;
|
VKStreamBuffer stream_buffer;
|
||||||
VKStagingBufferPool staging_pool;
|
StagingBufferPool staging_pool;
|
||||||
VKDescriptorPool descriptor_pool;
|
VKDescriptorPool descriptor_pool;
|
||||||
VKUpdateDescriptorQueue update_descriptor_queue;
|
VKUpdateDescriptorQueue update_descriptor_queue;
|
||||||
BlitImageHelper blit_image;
|
BlitImageHelper blit_image;
|
||||||
|
@ -234,7 +234,7 @@ private:
|
||||||
VKFenceManager fence_manager;
|
VKFenceManager fence_manager;
|
||||||
|
|
||||||
vk::Buffer default_buffer;
|
vk::Buffer default_buffer;
|
||||||
VKMemoryCommit default_buffer_commit;
|
MemoryCommit default_buffer_commit;
|
||||||
vk::Event wfi_event;
|
vk::Event wfi_event;
|
||||||
VideoCommon::Shader::AsyncShaders async_shaders;
|
VideoCommon::Shader::AsyncShaders async_shaders;
|
||||||
|
|
||||||
|
|
|
@ -3,58 +3,64 @@
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <unordered_map>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
#include "common/bit_util.h"
|
#include "common/bit_util.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||||
#include "video_core/vulkan_common/vulkan_device.h"
|
|
||||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||||
|
#include "video_core/vulkan_common/vulkan_device.h"
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_)
|
StagingBufferPool::StagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
|
||||||
: buffer{std::move(buffer_)} {}
|
VKScheduler& scheduler_)
|
||||||
|
|
||||||
VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
|
|
||||||
VKScheduler& scheduler_)
|
|
||||||
: device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
|
: device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
|
||||||
|
|
||||||
VKStagingBufferPool::~VKStagingBufferPool() = default;
|
StagingBufferPool::~StagingBufferPool() = default;
|
||||||
|
|
||||||
VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) {
|
StagingBufferRef StagingBufferPool::Request(size_t size, bool host_visible) {
|
||||||
if (const auto buffer = TryGetReservedBuffer(size, host_visible)) {
|
if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, host_visible)) {
|
||||||
return *buffer;
|
return *ref;
|
||||||
}
|
}
|
||||||
return CreateStagingBuffer(size, host_visible);
|
return CreateStagingBuffer(size, host_visible);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKStagingBufferPool::TickFrame() {
|
void StagingBufferPool::TickFrame() {
|
||||||
current_delete_level = (current_delete_level + 1) % NumLevels;
|
current_delete_level = (current_delete_level + 1) % NUM_LEVELS;
|
||||||
|
|
||||||
ReleaseCache(true);
|
ReleaseCache(true);
|
||||||
ReleaseCache(false);
|
ReleaseCache(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
|
std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t size,
|
||||||
for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
|
bool host_visible) {
|
||||||
if (!scheduler.IsFree(entry.tick)) {
|
StagingBuffers& cache_level = GetCache(host_visible)[Common::Log2Ceil64(size)];
|
||||||
continue;
|
|
||||||
|
const auto is_free = [this](const StagingBuffer& entry) {
|
||||||
|
return scheduler.IsFree(entry.tick);
|
||||||
|
};
|
||||||
|
auto& entries = cache_level.entries;
|
||||||
|
const auto hint_it = entries.begin() + cache_level.iterate_index;
|
||||||
|
auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free);
|
||||||
|
if (it == entries.end()) {
|
||||||
|
it = std::find_if(entries.begin(), hint_it, is_free);
|
||||||
|
if (it == hint_it) {
|
||||||
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
entry.tick = scheduler.CurrentTick();
|
|
||||||
return &*entry.buffer;
|
|
||||||
}
|
}
|
||||||
return nullptr;
|
cache_level.iterate_index = std::distance(entries.begin(), it) + 1;
|
||||||
|
it->tick = scheduler.CurrentTick();
|
||||||
|
return it->Ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
|
StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, bool host_visible) {
|
||||||
const u32 log2 = Common::Log2Ceil64(size);
|
const u32 log2 = Common::Log2Ceil64(size);
|
||||||
|
vk::Buffer buffer = device.GetLogical().CreateBuffer({
|
||||||
auto buffer = std::make_unique<VKBuffer>();
|
|
||||||
buffer->handle = device.GetLogical().CreateBuffer({
|
|
||||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
|
@ -66,49 +72,53 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
|
||||||
.queueFamilyIndexCount = 0,
|
.queueFamilyIndexCount = 0,
|
||||||
.pQueueFamilyIndices = nullptr,
|
.pQueueFamilyIndices = nullptr,
|
||||||
});
|
});
|
||||||
buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
|
if (device.HasDebuggingToolAttached()) {
|
||||||
|
++buffer_index;
|
||||||
|
buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str());
|
||||||
|
}
|
||||||
|
MemoryCommit commit = memory_manager.Commit(buffer, host_visible);
|
||||||
|
const std::span<u8> mapped_span = host_visible ? commit.Map() : std::span<u8>{};
|
||||||
|
|
||||||
std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries;
|
StagingBuffer& entry = GetCache(host_visible)[log2].entries.emplace_back(StagingBuffer{
|
||||||
StagingBuffer& entry = entries.emplace_back(std::move(buffer));
|
.buffer = std::move(buffer),
|
||||||
entry.tick = scheduler.CurrentTick();
|
.commit = std::move(commit),
|
||||||
return *entry.buffer;
|
.mapped_span = mapped_span,
|
||||||
|
.tick = scheduler.CurrentTick(),
|
||||||
|
});
|
||||||
|
return entry.Ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
|
StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(bool host_visible) {
|
||||||
return host_visible ? host_staging_buffers : device_staging_buffers;
|
return host_visible ? host_staging_buffers : device_staging_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKStagingBufferPool::ReleaseCache(bool host_visible) {
|
void StagingBufferPool::ReleaseCache(bool host_visible) {
|
||||||
auto& cache = GetCache(host_visible);
|
ReleaseLevel(GetCache(host_visible), current_delete_level);
|
||||||
const u64 size = ReleaseLevel(cache, current_delete_level);
|
|
||||||
if (size == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) {
|
void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
|
||||||
static constexpr std::size_t deletions_per_tick = 16;
|
constexpr size_t deletions_per_tick = 16;
|
||||||
|
|
||||||
auto& staging = cache[log2];
|
auto& staging = cache[log2];
|
||||||
auto& entries = staging.entries;
|
auto& entries = staging.entries;
|
||||||
const std::size_t old_size = entries.size();
|
const size_t old_size = entries.size();
|
||||||
|
|
||||||
const auto is_deleteable = [this](const StagingBuffer& entry) {
|
const auto is_deleteable = [this](const StagingBuffer& entry) {
|
||||||
return scheduler.IsFree(entry.tick);
|
return scheduler.IsFree(entry.tick);
|
||||||
};
|
};
|
||||||
const std::size_t begin_offset = staging.delete_index;
|
const size_t begin_offset = staging.delete_index;
|
||||||
const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
|
const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
|
||||||
const auto begin = std::begin(entries) + begin_offset;
|
const auto begin = entries.begin() + begin_offset;
|
||||||
const auto end = std::begin(entries) + end_offset;
|
const auto end = entries.begin() + end_offset;
|
||||||
entries.erase(std::remove_if(begin, end, is_deleteable), end);
|
entries.erase(std::remove_if(begin, end, is_deleteable), end);
|
||||||
|
|
||||||
const std::size_t new_size = entries.size();
|
const size_t new_size = entries.size();
|
||||||
staging.delete_index += deletions_per_tick;
|
staging.delete_index += deletions_per_tick;
|
||||||
if (staging.delete_index >= new_size) {
|
if (staging.delete_index >= new_size) {
|
||||||
staging.delete_index = 0;
|
staging.delete_index = 0;
|
||||||
}
|
}
|
||||||
|
if (staging.iterate_index > new_size) {
|
||||||
return (1ULL << log2) * (old_size - new_size);
|
staging.iterate_index = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -17,46 +17,54 @@ namespace Vulkan {
|
||||||
class Device;
|
class Device;
|
||||||
class VKScheduler;
|
class VKScheduler;
|
||||||
|
|
||||||
struct VKBuffer final {
|
struct StagingBufferRef {
|
||||||
vk::Buffer handle;
|
VkBuffer buffer;
|
||||||
VKMemoryCommit commit;
|
std::span<u8> mapped_span;
|
||||||
};
|
};
|
||||||
|
|
||||||
class VKStagingBufferPool final {
|
class StagingBufferPool {
|
||||||
public:
|
public:
|
||||||
explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager,
|
explicit StagingBufferPool(const Device& device, VKMemoryManager& memory_manager,
|
||||||
VKScheduler& scheduler);
|
VKScheduler& scheduler);
|
||||||
~VKStagingBufferPool();
|
~StagingBufferPool();
|
||||||
|
|
||||||
VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible);
|
StagingBufferRef Request(size_t size, bool host_visible);
|
||||||
|
|
||||||
void TickFrame();
|
void TickFrame();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct StagingBuffer final {
|
struct StagingBuffer {
|
||||||
explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer);
|
vk::Buffer buffer;
|
||||||
|
MemoryCommit commit;
|
||||||
std::unique_ptr<VKBuffer> buffer;
|
std::span<u8> mapped_span;
|
||||||
u64 tick = 0;
|
u64 tick = 0;
|
||||||
|
|
||||||
|
StagingBufferRef Ref() const noexcept {
|
||||||
|
return {
|
||||||
|
.buffer = *buffer,
|
||||||
|
.mapped_span = mapped_span,
|
||||||
|
};
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct StagingBuffers final {
|
struct StagingBuffers {
|
||||||
std::vector<StagingBuffer> entries;
|
std::vector<StagingBuffer> entries;
|
||||||
std::size_t delete_index = 0;
|
size_t delete_index = 0;
|
||||||
|
size_t iterate_index = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT;
|
static constexpr size_t NUM_LEVELS = sizeof(size_t) * CHAR_BIT;
|
||||||
using StagingBuffersCache = std::array<StagingBuffers, NumLevels>;
|
using StagingBuffersCache = std::array<StagingBuffers, NUM_LEVELS>;
|
||||||
|
|
||||||
VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible);
|
std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, bool host_visible);
|
||||||
|
|
||||||
VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible);
|
StagingBufferRef CreateStagingBuffer(size_t size, bool host_visible);
|
||||||
|
|
||||||
StagingBuffersCache& GetCache(bool host_visible);
|
StagingBuffersCache& GetCache(bool host_visible);
|
||||||
|
|
||||||
void ReleaseCache(bool host_visible);
|
void ReleaseCache(bool host_visible);
|
||||||
|
|
||||||
u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2);
|
void ReleaseLevel(StagingBuffersCache& cache, size_t log2);
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKMemoryManager& memory_manager;
|
VKMemoryManager& memory_manager;
|
||||||
|
@ -65,7 +73,8 @@ private:
|
||||||
StagingBuffersCache host_staging_buffers;
|
StagingBuffersCache host_staging_buffers;
|
||||||
StagingBuffersCache device_staging_buffers;
|
StagingBuffersCache device_staging_buffers;
|
||||||
|
|
||||||
std::size_t current_delete_level = 0;
|
size_t current_delete_level = 0;
|
||||||
|
u64 buffer_index = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -554,10 +554,10 @@ void TextureCacheRuntime::Finish() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) {
|
ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) {
|
||||||
const auto& buffer = staging_buffer_pool.GetUnusedBuffer(size, true);
|
const auto staging_ref = staging_buffer_pool.Request(size, true);
|
||||||
return ImageBufferMap{
|
return ImageBufferMap{
|
||||||
.handle = *buffer.handle,
|
.handle = staging_ref.buffer,
|
||||||
.map = buffer.commit->Map(size),
|
.span = staging_ref.mapped_span,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,14 +19,13 @@ using VideoCommon::Offset2D;
|
||||||
using VideoCommon::RenderTargets;
|
using VideoCommon::RenderTargets;
|
||||||
using VideoCore::Surface::PixelFormat;
|
using VideoCore::Surface::PixelFormat;
|
||||||
|
|
||||||
class VKScheduler;
|
|
||||||
class VKStagingBufferPool;
|
|
||||||
|
|
||||||
class BlitImageHelper;
|
class BlitImageHelper;
|
||||||
class Device;
|
class Device;
|
||||||
class Image;
|
class Image;
|
||||||
class ImageView;
|
class ImageView;
|
||||||
class Framebuffer;
|
class Framebuffer;
|
||||||
|
class StagingBufferPool;
|
||||||
|
class VKScheduler;
|
||||||
|
|
||||||
struct RenderPassKey {
|
struct RenderPassKey {
|
||||||
constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
|
constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
|
||||||
|
@ -60,18 +59,18 @@ struct ImageBufferMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] std::span<u8> Span() const noexcept {
|
[[nodiscard]] std::span<u8> Span() const noexcept {
|
||||||
return map.Span();
|
return span;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkBuffer handle;
|
VkBuffer handle;
|
||||||
MemoryMap map;
|
std::span<u8> span;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TextureCacheRuntime {
|
struct TextureCacheRuntime {
|
||||||
const Device& device;
|
const Device& device;
|
||||||
VKScheduler& scheduler;
|
VKScheduler& scheduler;
|
||||||
VKMemoryManager& memory_manager;
|
VKMemoryManager& memory_manager;
|
||||||
VKStagingBufferPool& staging_buffer_pool;
|
StagingBufferPool& staging_buffer_pool;
|
||||||
BlitImageHelper& blit_image_helper;
|
BlitImageHelper& blit_image_helper;
|
||||||
std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache;
|
std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache;
|
||||||
|
|
||||||
|
@ -141,7 +140,7 @@ private:
|
||||||
VKScheduler* scheduler;
|
VKScheduler* scheduler;
|
||||||
vk::Image image;
|
vk::Image image;
|
||||||
vk::Buffer buffer;
|
vk::Buffer buffer;
|
||||||
VKMemoryCommit commit;
|
MemoryCommit commit;
|
||||||
VkImageAspectFlags aspect_mask = 0;
|
VkImageAspectFlags aspect_mask = 0;
|
||||||
bool initialized = false;
|
bool initialized = false;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in a new issue