renderer_vulkan: Use VMA for buffers

This commit is contained in:
GPUCode 2023-05-27 17:09:17 +03:00
parent 48e39756f1
commit 7b2f680468
16 changed files with 262 additions and 211 deletions

View file

@ -233,8 +233,8 @@ void Vulkan::RendererVulkan::RenderScreenshot(const Tegra::FramebufferConfig& fr
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}; };
const vk::Buffer dst_buffer = device.GetLogical().CreateBuffer(dst_buffer_info); const vk::Buffer dst_buffer =
MemoryCommit dst_buffer_memory = memory_allocator.Commit(dst_buffer, MemoryUsage::Download); memory_allocator.CreateBuffer(dst_buffer_info, MemoryUsage::Download);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([&](vk::CommandBuffer cmdbuf) { scheduler.Record([&](vk::CommandBuffer cmdbuf) {
@ -308,8 +308,9 @@ void Vulkan::RendererVulkan::RenderScreenshot(const Tegra::FramebufferConfig& fr
scheduler.Finish(); scheduler.Finish();
// Copy backing image data to the QImage screenshot buffer // Copy backing image data to the QImage screenshot buffer
const auto dst_memory_map = dst_buffer_memory.Map(); dst_buffer.Invalidate();
std::memcpy(renderer_settings.screenshot_bits, dst_memory_map.data(), dst_memory_map.size()); std::memcpy(renderer_settings.screenshot_bits, dst_buffer.Mapped().data(),
dst_buffer.Mapped().size());
renderer_settings.screenshot_complete_callback(false); renderer_settings.screenshot_complete_callback(false);
renderer_settings.screenshot_requested = false; renderer_settings.screenshot_requested = false;
} }

View file

@ -162,7 +162,7 @@ void BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
SetUniformData(data, layout); SetUniformData(data, layout);
SetVertexData(data, framebuffer, layout); SetVertexData(data, framebuffer, layout);
const std::span<u8> mapped_span = buffer_commit.Map(); const std::span<u8> mapped_span = buffer.Mapped();
std::memcpy(mapped_span.data(), &data, sizeof(data)); std::memcpy(mapped_span.data(), &data, sizeof(data));
if (!use_accelerated) { if (!use_accelerated) {
@ -1074,7 +1074,6 @@ void BlitScreen::ReleaseRawImages() {
aa_image_view.reset(); aa_image_view.reset();
aa_image.reset(); aa_image.reset();
buffer.reset(); buffer.reset();
buffer_commit = MemoryCommit{};
} }
void BlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { void BlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
@ -1090,8 +1089,7 @@ void BlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}; };
buffer = device.GetLogical().CreateBuffer(ci); buffer = memory_allocator.CreateBuffer(ci, MemoryUsage::Upload);
buffer_commit = memory_allocator.Commit(buffer, MemoryUsage::Upload);
} }
void BlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { void BlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) {

View file

@ -142,7 +142,6 @@ private:
vk::Sampler sampler; vk::Sampler sampler;
vk::Buffer buffer; vk::Buffer buffer;
MemoryCommit buffer_commit;
std::vector<u64> resource_ticks; std::vector<u64> resource_ticks;

View file

@ -50,7 +50,7 @@ size_t BytesPerIndex(VkIndexType index_type) {
} }
} }
vk::Buffer CreateBuffer(const Device& device, u64 size) { vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allocator, u64 size) {
VkBufferUsageFlags flags = VkBufferUsageFlags flags =
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
@ -60,7 +60,7 @@ vk::Buffer CreateBuffer(const Device& device, u64 size) {
if (device.IsExtTransformFeedbackSupported()) { if (device.IsExtTransformFeedbackSupported()) {
flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT; flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
} }
return device.GetLogical().CreateBuffer({ const VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -69,7 +69,8 @@ vk::Buffer CreateBuffer(const Device& device, u64 size) {
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
return memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
} }
} // Anonymous namespace } // Anonymous namespace
@ -79,8 +80,8 @@ Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_, Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
VAddr cpu_addr_, u64 size_bytes_) VAddr cpu_addr_, u64 size_bytes_)
: VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_), : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_),
device{&runtime.device}, buffer{CreateBuffer(*device, SizeBytes())}, device{&runtime.device}, buffer{
commit{runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal)} { CreateBuffer(*device, runtime.memory_allocator, SizeBytes())} {
if (runtime.device.HasDebuggingToolAttached()) { if (runtime.device.HasDebuggingToolAttached()) {
buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str()); buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str());
} }
@ -138,7 +139,7 @@ public:
const u32 num_first_offset_copies = 4; const u32 num_first_offset_copies = 4;
const size_t bytes_per_index = BytesPerIndex(index_type); const size_t bytes_per_index = BytesPerIndex(index_type);
const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies; const size_t size_bytes = num_triangle_indices * bytes_per_index * num_first_offset_copies;
buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ const VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -147,14 +148,21 @@ public:
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
if (device.HasDebuggingToolAttached()) { if (device.HasDebuggingToolAttached()) {
buffer.SetObjectNameEXT("Quad LUT"); buffer.SetObjectNameEXT("Quad LUT");
} }
memory_commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
const StagingBufferRef staging = staging_pool.Request(size_bytes, MemoryUsage::Upload); const bool host_visible = buffer.IsHostVisible();
u8* staging_data = staging.mapped_span.data(); const StagingBufferRef staging = [&] {
if (host_visible) {
return StagingBufferRef{};
}
return staging_pool.Request(size_bytes, MemoryUsage::Upload);
}();
u8* staging_data = host_visible ? buffer.Mapped().data() : staging.mapped_span.data();
const size_t quad_size = bytes_per_index * 6; const size_t quad_size = bytes_per_index * 6;
for (u32 first = 0; first < num_first_offset_copies; ++first) { for (u32 first = 0; first < num_first_offset_copies; ++first) {
@ -164,29 +172,33 @@ public:
} }
} }
scheduler.RequestOutsideRenderPassOperationContext(); if (!host_visible) {
scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset, scheduler.RequestOutsideRenderPassOperationContext();
dst_buffer = *buffer, size_bytes](vk::CommandBuffer cmdbuf) { scheduler.Record([src_buffer = staging.buffer, src_offset = staging.offset,
const VkBufferCopy copy{ dst_buffer = *buffer, size_bytes](vk::CommandBuffer cmdbuf) {
.srcOffset = src_offset, const VkBufferCopy copy{
.dstOffset = 0, .srcOffset = src_offset,
.size = size_bytes, .dstOffset = 0,
}; .size = size_bytes,
const VkBufferMemoryBarrier write_barrier{ };
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, const VkBufferMemoryBarrier write_barrier{
.pNext = nullptr, .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, .pNext = nullptr,
.dstAccessMask = VK_ACCESS_INDEX_READ_BIT, .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstAccessMask = VK_ACCESS_INDEX_READ_BIT,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.buffer = dst_buffer, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.offset = 0, .buffer = dst_buffer,
.size = size_bytes, .offset = 0,
}; .size = size_bytes,
cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy); };
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, write_barrier); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
}); VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, write_barrier);
});
} else {
buffer.Flush();
}
} }
void BindBuffer(u32 first) { void BindBuffer(u32 first) {
@ -587,11 +599,10 @@ void BufferCacheRuntime::ReserveNullBuffer() {
create_info.usage |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT; create_info.usage |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
} }
create_info.usage |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; create_info.usage |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
null_buffer = device.GetLogical().CreateBuffer(create_info); null_buffer = memory_allocator.CreateBuffer(create_info, MemoryUsage::DeviceLocal);
if (device.HasDebuggingToolAttached()) { if (device.HasDebuggingToolAttached()) {
null_buffer.SetObjectNameEXT("Null buffer"); null_buffer.SetObjectNameEXT("Null buffer");
} }
null_buffer_commit = memory_allocator.Commit(null_buffer, MemoryUsage::DeviceLocal);
scheduler.RequestOutsideRenderPassOperationContext(); scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([buffer = *null_buffer](vk::CommandBuffer cmdbuf) { scheduler.Record([buffer = *null_buffer](vk::CommandBuffer cmdbuf) {

View file

@ -48,7 +48,6 @@ private:
const Device* device{}; const Device* device{};
vk::Buffer buffer; vk::Buffer buffer;
MemoryCommit commit;
std::vector<BufferView> views; std::vector<BufferView> views;
}; };
@ -142,7 +141,6 @@ private:
std::shared_ptr<QuadStripIndexBuffer> quad_strip_index_buffer; std::shared_ptr<QuadStripIndexBuffer> quad_strip_index_buffer;
vk::Buffer null_buffer; vk::Buffer null_buffer;
MemoryCommit null_buffer_commit;
std::unique_ptr<Uint8Pass> uint8_pass; std::unique_ptr<Uint8Pass> uint8_pass;
QuadIndexedPass quad_index_pass; QuadIndexedPass quad_index_pass;

View file

@ -76,7 +76,7 @@ void TransitionImageLayout(vk::CommandBuffer& cmdbuf, VkImage image, VkImageLayo
void UploadImage(const Device& device, MemoryAllocator& allocator, Scheduler& scheduler, void UploadImage(const Device& device, MemoryAllocator& allocator, Scheduler& scheduler,
vk::Image& image, VkExtent2D dimensions, VkFormat format, vk::Image& image, VkExtent2D dimensions, VkFormat format,
std::span<const u8> initial_contents = {}) { std::span<const u8> initial_contents = {}) {
auto upload_buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ const VkBufferCreateInfo upload_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -85,9 +85,10 @@ void UploadImage(const Device& device, MemoryAllocator& allocator, Scheduler& sc
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
auto upload_commit = allocator.Commit(upload_buffer, MemoryUsage::Upload); auto upload_buffer = allocator.CreateBuffer(upload_ci, MemoryUsage::Upload);
std::ranges::copy(initial_contents, upload_commit.Map().begin()); std::ranges::copy(initial_contents, upload_buffer.Mapped().begin());
upload_buffer.Flush();
const std::array<VkBufferImageCopy, 1> regions{{{ const std::array<VkBufferImageCopy, 1> regions{{{
.bufferOffset = 0, .bufferOffset = 0,
@ -111,9 +112,6 @@ void UploadImage(const Device& device, MemoryAllocator& allocator, Scheduler& sc
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}); });
scheduler.Finish(); scheduler.Finish();
// This should go out of scope before the commit
auto upload_buffer2 = std::move(upload_buffer);
} }
vk::ImageView CreateWrappedImageView(const Device& device, vk::Image& image, VkFormat format) { vk::ImageView CreateWrappedImageView(const Device& device, vk::Image& image, VkFormat format) {

View file

@ -30,55 +30,6 @@ constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB; constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS; constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
constexpr VkMemoryPropertyFlags HOST_FLAGS =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS;
bool IsStreamHeap(VkMemoryHeap heap) noexcept {
return STREAM_BUFFER_SIZE < (heap.size * 2) / 3;
}
std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
VkMemoryPropertyFlags flags) noexcept {
for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
if (((type_mask >> type_index) & 1) == 0) {
// Memory type is incompatible
continue;
}
const VkMemoryType& memory_type = props.memoryTypes[type_index];
if ((memory_type.propertyFlags & flags) != flags) {
// Memory type doesn't have the flags we want
continue;
}
if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex])) {
// Memory heap is not suitable for streaming
continue;
}
// Success!
return type_index;
}
return std::nullopt;
}
u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
bool try_device_local) {
std::optional<u32> type;
if (try_device_local) {
// Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this
type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS);
if (type) {
return *type;
}
}
// Otherwise try without the DEVICE_LOCAL_BIT
type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS);
if (type) {
return *type;
}
// This should never happen, and in case it does, signal it as an out of memory situation
throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
size_t Region(size_t iterator) noexcept { size_t Region(size_t iterator) noexcept {
return iterator / REGION_SIZE; return iterator / REGION_SIZE;
} }
@ -87,8 +38,7 @@ size_t Region(size_t iterator) noexcept {
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
Scheduler& scheduler_) Scheduler& scheduler_)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} { : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
const vk::Device& dev = device.GetLogical(); const VkBufferCreateInfo stream_ci = {
stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -99,46 +49,13 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
stream_buffer = memory_allocator.CreateBuffer(stream_ci, MemoryUsage::Stream);
if (device.HasDebuggingToolAttached()) { if (device.HasDebuggingToolAttached()) {
stream_buffer.SetObjectNameEXT("Stream Buffer"); stream_buffer.SetObjectNameEXT("Stream Buffer");
} }
VkMemoryDedicatedRequirements dedicated_reqs{ stream_pointer = stream_buffer.Mapped();
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, ASSERT_MSG(!stream_pointer.empty(), "Stream buffer must be host visible!");
.pNext = nullptr,
.prefersDedicatedAllocation = VK_FALSE,
.requiresDedicatedAllocation = VK_FALSE,
};
const auto requirements = dev.GetBufferMemoryRequirements(*stream_buffer, &dedicated_reqs);
const bool make_dedicated = dedicated_reqs.prefersDedicatedAllocation == VK_TRUE ||
dedicated_reqs.requiresDedicatedAllocation == VK_TRUE;
const VkMemoryDedicatedAllocateInfo dedicated_info{
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
.pNext = nullptr,
.image = nullptr,
.buffer = *stream_buffer,
};
const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
VkMemoryAllocateInfo stream_memory_info{
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = make_dedicated ? &dedicated_info : nullptr,
.allocationSize = requirements.size,
.memoryTypeIndex =
FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true),
};
stream_memory = dev.TryAllocateMemory(stream_memory_info);
if (!stream_memory) {
LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory");
stream_memory_info.memoryTypeIndex =
FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, false);
stream_memory = dev.AllocateMemory(stream_memory_info);
}
if (device.HasDebuggingToolAttached()) {
stream_memory.SetObjectNameEXT("Stream Buffer Memory");
}
stream_buffer.BindMemory(*stream_memory, 0);
stream_pointer = stream_memory.Map(0, STREAM_BUFFER_SIZE);
} }
StagingBufferPool::~StagingBufferPool() = default; StagingBufferPool::~StagingBufferPool() = default;
@ -199,7 +116,7 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
return StagingBufferRef{ return StagingBufferRef{
.buffer = *stream_buffer, .buffer = *stream_buffer,
.offset = static_cast<VkDeviceSize>(offset), .offset = static_cast<VkDeviceSize>(offset),
.mapped_span = std::span<u8>(stream_pointer + offset, size), .mapped_span = stream_pointer.subspan(offset, size),
.usage{}, .usage{},
.log2_level{}, .log2_level{},
.index{}, .index{},
@ -247,7 +164,7 @@ std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t s
StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage, StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage,
bool deferred) { bool deferred) {
const u32 log2 = Common::Log2Ceil64(size); const u32 log2 = Common::Log2Ceil64(size);
vk::Buffer buffer = device.GetLogical().CreateBuffer({ const VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -259,17 +176,15 @@ StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
vk::Buffer buffer = memory_allocator.CreateBuffer(buffer_ci, usage);
if (device.HasDebuggingToolAttached()) { if (device.HasDebuggingToolAttached()) {
++buffer_index; ++buffer_index;
buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str()); buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str());
} }
MemoryCommit commit = memory_allocator.Commit(buffer, usage); const std::span<u8> mapped_span = buffer.Mapped();
const std::span<u8> mapped_span = IsHostVisible(usage) ? commit.Map() : std::span<u8>{};
StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{ StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{
.buffer = std::move(buffer), .buffer = std::move(buffer),
.commit = std::move(commit),
.mapped_span = mapped_span, .mapped_span = mapped_span,
.usage = usage, .usage = usage,
.log2_level = log2, .log2_level = log2,

View file

@ -46,7 +46,6 @@ private:
struct StagingBuffer { struct StagingBuffer {
vk::Buffer buffer; vk::Buffer buffer;
MemoryCommit commit;
std::span<u8> mapped_span; std::span<u8> mapped_span;
MemoryUsage usage; MemoryUsage usage;
u32 log2_level; u32 log2_level;
@ -97,8 +96,7 @@ private:
Scheduler& scheduler; Scheduler& scheduler;
vk::Buffer stream_buffer; vk::Buffer stream_buffer;
vk::DeviceMemory stream_memory; std::span<u8> stream_pointer;
u8* stream_pointer = nullptr;
size_t iterator = 0; size_t iterator = 0;
size_t used_iterator = 0; size_t used_iterator = 0;

View file

@ -839,14 +839,14 @@ bool TextureCacheRuntime::ShouldReinterpret(Image& dst, Image& src) {
VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) { VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) {
const auto level = (8 * sizeof(size_t)) - std::countl_zero(needed_size - 1ULL); const auto level = (8 * sizeof(size_t)) - std::countl_zero(needed_size - 1ULL);
if (buffer_commits[level]) { if (buffers[level]) {
return *buffers[level]; return *buffers[level];
} }
const auto new_size = Common::NextPow2(needed_size); const auto new_size = Common::NextPow2(needed_size);
static constexpr VkBufferUsageFlags flags = static constexpr VkBufferUsageFlags flags =
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffers[level] = device.GetLogical().CreateBuffer({ const VkBufferCreateInfo temp_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -855,9 +855,8 @@ VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) {
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
buffer_commits[level] = std::make_unique<MemoryCommit>( buffers[level] = memory_allocator.CreateBuffer(temp_ci, MemoryUsage::DeviceLocal);
memory_allocator.Commit(buffers[level], MemoryUsage::DeviceLocal));
return *buffers[level]; return *buffers[level];
} }

View file

@ -116,7 +116,6 @@ public:
static constexpr size_t indexing_slots = 8 * sizeof(size_t); static constexpr size_t indexing_slots = 8 * sizeof(size_t);
std::array<vk::Buffer, indexing_slots> buffers{}; std::array<vk::Buffer, indexing_slots> buffers{};
std::array<std::unique_ptr<MemoryCommit>, indexing_slots> buffer_commits{};
}; };
class Image : public VideoCommon::ImageBase { class Image : public VideoCommon::ImageBase {

View file

@ -41,7 +41,7 @@ void TurboMode::Run(std::stop_token stop_token) {
auto& dld = m_device.GetLogical(); auto& dld = m_device.GetLogical();
// Allocate buffer. 2MiB should be sufficient. // Allocate buffer. 2MiB should be sufficient.
auto buffer = dld.CreateBuffer(VkBufferCreateInfo{ const VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr, .pNext = nullptr,
.flags = 0, .flags = 0,
@ -50,10 +50,8 @@ void TurboMode::Run(std::stop_token stop_token) {
.sharingMode = VK_SHARING_MODE_EXCLUSIVE, .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0, .queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr, .pQueueFamilyIndices = nullptr,
}); };
vk::Buffer buffer = m_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
// Commit some device local memory for the buffer.
auto commit = m_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
// Create the descriptor pool to contain our descriptor. // Create the descriptor pool to contain our descriptor.
static constexpr VkDescriptorPoolSize pool_size{ static constexpr VkDescriptorPoolSize pool_size{

View file

@ -603,6 +603,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
}; };
const VmaAllocatorCreateInfo allocator_info = { const VmaAllocatorCreateInfo allocator_info = {
.flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
.physicalDevice = physical, .physicalDevice = physical,
.device = *logical, .device = *logical,
.pVulkanFunctions = &functions, .pVulkanFunctions = &functions,

View file

@ -51,11 +51,59 @@ struct Range {
case MemoryUsage::Download: case MemoryUsage::Download:
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT; VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
case MemoryUsage::Stream:
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
} }
ASSERT_MSG(false, "Invalid memory usage={}", usage); ASSERT_MSG(false, "Invalid memory usage={}", usage);
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
} }
[[nodiscard]] VkMemoryPropertyFlags MemoryUsageRequiredVmaFlags(MemoryUsage usage) {
switch (usage) {
case MemoryUsage::DeviceLocal:
return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
case MemoryUsage::Upload:
case MemoryUsage::Stream:
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
case MemoryUsage::Download:
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
}
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return {};
}
[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePreferedVmaFlags(MemoryUsage usage) {
return usage != MemoryUsage::DeviceLocal ? VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
: VkMemoryPropertyFlags{};
}
[[nodiscard]] VmaAllocationCreateFlags MemoryUsageVmaFlags(MemoryUsage usage) {
switch (usage) {
case MemoryUsage::Upload:
case MemoryUsage::Stream:
return VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
case MemoryUsage::Download:
return VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
case MemoryUsage::DeviceLocal:
return VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT;
}
return {};
}
[[nodiscard]] VmaMemoryUsage MemoryUsageVma(MemoryUsage usage) {
switch (usage) {
case MemoryUsage::DeviceLocal:
case MemoryUsage::Stream:
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
case MemoryUsage::Upload:
case MemoryUsage::Download:
return VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
}
return VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
}
} // Anonymous namespace } // Anonymous namespace
class MemoryAllocation { class MemoryAllocation {
@ -178,17 +226,18 @@ void MemoryCommit::Release() {
} }
MemoryAllocator::MemoryAllocator(const Device& device_) MemoryAllocator::MemoryAllocator(const Device& device_)
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties().memoryProperties}, : device{device_}, allocator{device.GetAllocator()},
properties{device_.GetPhysical().GetMemoryProperties().memoryProperties},
buffer_image_granularity{ buffer_image_granularity{
device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {} device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {}
MemoryAllocator::~MemoryAllocator() = default; MemoryAllocator::~MemoryAllocator() = default;
vk::Image MemoryAllocator::CreateImage(const VkImageCreateInfo& ci) const { vk::Image MemoryAllocator::CreateImage(const VkImageCreateInfo& ci) const {
const VmaAllocationCreateInfo alloc_info = { const VmaAllocationCreateInfo alloc_ci = {
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT, .flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT,
.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, .usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE,
.requiredFlags = 0, .requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
.preferredFlags = 0, .preferredFlags = 0,
.pool = VK_NULL_HANDLE, .pool = VK_NULL_HANDLE,
.pUserData = nullptr, .pUserData = nullptr,
@ -196,12 +245,40 @@ vk::Image MemoryAllocator::CreateImage(const VkImageCreateInfo& ci) const {
VkImage handle{}; VkImage handle{};
VmaAllocation allocation{}; VmaAllocation allocation{};
vk::Check(
vmaCreateImage(device.GetAllocator(), &ci, &alloc_info, &handle, &allocation, nullptr)); vk::Check(vmaCreateImage(allocator, &ci, &alloc_ci, &handle, &allocation, nullptr));
return vk::Image(handle, *device.GetLogical(), device.GetAllocator(), allocation,
return vk::Image(handle, *device.GetLogical(), allocator, allocation,
device.GetDispatchLoader()); device.GetDispatchLoader());
} }
vk::Buffer MemoryAllocator::CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsage usage) const {
const VmaAllocationCreateInfo alloc_ci = {
.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT |
MemoryUsageVmaFlags(usage),
.usage = MemoryUsageVma(usage),
.requiredFlags = MemoryUsageRequiredVmaFlags(usage),
.preferredFlags = MemoryUsagePreferedVmaFlags(usage),
.pool = VK_NULL_HANDLE,
.pUserData = nullptr,
};
VkBuffer handle{};
VmaAllocationInfo alloc_info{};
VmaAllocation allocation{};
VkMemoryPropertyFlags property_flags{};
vk::Check(vmaCreateBuffer(allocator, &ci, &alloc_ci, &handle, &allocation, &alloc_info));
vmaGetAllocationMemoryProperties(allocator, allocation, &property_flags);
u8* data = reinterpret_cast<u8*>(alloc_info.pMappedData);
const std::span<u8> mapped_data = data ? std::span<u8>{data, ci.size} : std::span<u8>{};
const bool is_coherent = property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
return vk::Buffer(handle, *device.GetLogical(), allocator, allocation, mapped_data, is_coherent,
device.GetDispatchLoader());
}
MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) { MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) {
// Find the fastest memory flags we can afford with the current requirements // Find the fastest memory flags we can afford with the current requirements
const u32 type_mask = requirements.memoryTypeBits; const u32 type_mask = requirements.memoryTypeBits;
@ -221,12 +298,6 @@ MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, M
return TryCommit(requirements, flags).value(); return TryCommit(requirements, flags).value();
} }
MemoryCommit MemoryAllocator::Commit(const vk::Buffer& buffer, MemoryUsage usage) {
auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), usage);
buffer.BindMemory(commit.Memory(), commit.Offset());
return commit;
}
bool MemoryAllocator::TryAllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) { bool MemoryAllocator::TryAllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) {
const u32 type = FindType(flags, type_mask).value(); const u32 type = FindType(flags, type_mask).value();
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({ vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
@ -302,16 +373,4 @@ std::optional<u32> MemoryAllocator::FindType(VkMemoryPropertyFlags flags, u32 ty
return std::nullopt; return std::nullopt;
} }
bool IsHostVisible(MemoryUsage usage) noexcept {
switch (usage) {
case MemoryUsage::DeviceLocal:
return false;
case MemoryUsage::Upload:
case MemoryUsage::Download:
return true;
}
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return false;
}
} // namespace Vulkan } // namespace Vulkan

View file

@ -9,6 +9,8 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h" #include "video_core/vulkan_common/vulkan_wrapper.h"
VK_DEFINE_HANDLE(VmaAllocator)
namespace Vulkan { namespace Vulkan {
class Device; class Device;
@ -17,9 +19,11 @@ class MemoryAllocation;
/// Hints and requirements for the backing memory type of a commit /// Hints and requirements for the backing memory type of a commit
enum class MemoryUsage { enum class MemoryUsage {
DeviceLocal, ///< Hints device local usages, fastest memory type to read and write from the GPU DeviceLocal, ///< Requests device local host visible buffer, falling back to device local
///< memory.
Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads
Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks
Stream, ///< Requests device local host visible buffer, falling back host memory.
}; };
/// Ownership handle of a memory commitment. /// Ownership handle of a memory commitment.
@ -82,6 +86,8 @@ public:
vk::Image CreateImage(const VkImageCreateInfo& ci) const; vk::Image CreateImage(const VkImageCreateInfo& ci) const;
vk::Buffer CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsage usage) const;
/** /**
* Commits a memory with the specified requirements. * Commits a memory with the specified requirements.
* *
@ -113,13 +119,11 @@ private:
std::optional<u32> FindType(VkMemoryPropertyFlags flags, u32 type_mask) const; std::optional<u32> FindType(VkMemoryPropertyFlags flags, u32 type_mask) const;
const Device& device; ///< Device handle. const Device& device; ///< Device handle.
VmaAllocator allocator; ///< Vma allocator.
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations. std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
VkDeviceSize buffer_image_granularity; // The granularity for adjacent offsets between buffers VkDeviceSize buffer_image_granularity; // The granularity for adjacent offsets between buffers
// and optimal images // and optimal images
}; };
/// Returns true when a memory usage is guaranteed to be host visible.
bool IsHostVisible(MemoryUsage usage) noexcept;
} // namespace Vulkan } // namespace Vulkan

View file

@ -561,14 +561,28 @@ void Image::Release() const noexcept {
} }
} }
void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const { void Buffer::Flush() const {
Check(dld->vkBindBufferMemory(owner, handle, memory, offset)); if (!is_coherent) {
vmaFlushAllocation(allocator, allocation, 0, VK_WHOLE_SIZE);
}
}
void Buffer::Invalidate() const {
if (!is_coherent) {
vmaInvalidateAllocation(allocator, allocation, 0, VK_WHOLE_SIZE);
}
} }
void Buffer::SetObjectNameEXT(const char* name) const { void Buffer::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER, name); SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER, name);
} }
void Buffer::Release() const noexcept {
if (handle) {
vmaDestroyBuffer(allocator, handle, allocation);
}
}
void BufferView::SetObjectNameEXT(const char* name) const { void BufferView::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER_VIEW, name); SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER_VIEW, name);
} }
@ -707,12 +721,6 @@ Queue Device::GetQueue(u32 family_index) const noexcept {
return Queue(queue, *dld); return Queue(queue, *dld);
} }
Buffer Device::CreateBuffer(const VkBufferCreateInfo& ci) const {
VkBuffer object;
Check(dld->vkCreateBuffer(handle, &ci, nullptr, &object));
return Buffer(object, handle, *dld);
}
BufferView Device::CreateBufferView(const VkBufferViewCreateInfo& ci) const { BufferView Device::CreateBufferView(const VkBufferViewCreateInfo& ci) const {
VkBufferView object; VkBufferView object;
Check(dld->vkCreateBufferView(handle, &ci, nullptr, &object)); Check(dld->vkCreateBufferView(handle, &ci, nullptr, &object));

View file

@ -673,6 +673,84 @@ private:
const DeviceDispatch* dld = nullptr; const DeviceDispatch* dld = nullptr;
}; };
class Buffer {
public:
explicit Buffer(VkBuffer handle_, VkDevice owner_, VmaAllocator allocator_,
VmaAllocation allocation_, std::span<u8> mapped_, bool is_coherent_,
const DeviceDispatch& dld_) noexcept
: handle{handle_}, owner{owner_}, allocator{allocator_},
allocation{allocation_}, mapped{mapped_}, is_coherent{is_coherent_}, dld{&dld_} {}
Buffer() = default;
Buffer(const Buffer&) = delete;
Buffer& operator=(const Buffer&) = delete;
Buffer(Buffer&& rhs) noexcept
: handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, allocator{rhs.allocator},
allocation{rhs.allocation}, mapped{rhs.mapped},
is_coherent{rhs.is_coherent}, dld{rhs.dld} {}
Buffer& operator=(Buffer&& rhs) noexcept {
Release();
handle = std::exchange(rhs.handle, nullptr);
owner = rhs.owner;
allocator = rhs.allocator;
allocation = rhs.allocation;
mapped = rhs.mapped;
is_coherent = rhs.is_coherent;
dld = rhs.dld;
return *this;
}
~Buffer() noexcept {
Release();
}
VkBuffer operator*() const noexcept {
return handle;
}
void reset() noexcept {
Release();
handle = nullptr;
}
explicit operator bool() const noexcept {
return handle != nullptr;
}
/// Returns the host mapped memory, an empty span otherwise.
std::span<u8> Mapped() noexcept {
return mapped;
}
std::span<const u8> Mapped() const noexcept {
return mapped;
}
/// Returns true if the buffer is mapped to the host.
bool IsHostVisible() const noexcept {
return !mapped.empty();
}
void Flush() const;
void Invalidate() const;
void SetObjectNameEXT(const char* name) const;
private:
void Release() const noexcept;
VkBuffer handle = nullptr;
VkDevice owner = nullptr;
VmaAllocator allocator = nullptr;
VmaAllocation allocation = nullptr;
std::span<u8> mapped = {};
bool is_coherent = false;
const DeviceDispatch* dld = nullptr;
};
class Queue { class Queue {
public: public:
/// Construct an empty queue handle. /// Construct an empty queue handle.
@ -696,17 +774,6 @@ private:
const DeviceDispatch* dld = nullptr; const DeviceDispatch* dld = nullptr;
}; };
class Buffer : public Handle<VkBuffer, VkDevice, DeviceDispatch> {
using Handle<VkBuffer, VkDevice, DeviceDispatch>::Handle;
public:
/// Attaches a memory allocation.
void BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const;
/// Set object name.
void SetObjectNameEXT(const char* name) const;
};
class BufferView : public Handle<VkBufferView, VkDevice, DeviceDispatch> { class BufferView : public Handle<VkBufferView, VkDevice, DeviceDispatch> {
using Handle<VkBufferView, VkDevice, DeviceDispatch>::Handle; using Handle<VkBufferView, VkDevice, DeviceDispatch>::Handle;
@ -886,8 +953,6 @@ public:
Queue GetQueue(u32 family_index) const noexcept; Queue GetQueue(u32 family_index) const noexcept;
Buffer CreateBuffer(const VkBufferCreateInfo& ci) const;
BufferView CreateBufferView(const VkBufferViewCreateInfo& ci) const; BufferView CreateBufferView(const VkBufferViewCreateInfo& ci) const;
ImageView CreateImageView(const VkImageViewCreateInfo& ci) const; ImageView CreateImageView(const VkImageViewCreateInfo& ci) const;