mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-04 14:02:45 +01:00
Merge pull request #4364 from lioncash/desig5
vulkan: Make use of designated initializers where applicable
This commit is contained in:
commit
821d295f24
19 changed files with 758 additions and 659 deletions
|
@ -39,16 +39,17 @@ std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKSch
|
||||||
|
|
||||||
Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
|
||||||
VKStagingBufferPool& staging_pool_, VAddr cpu_addr, std::size_t size)
|
VKStagingBufferPool& staging_pool_, VAddr cpu_addr, std::size_t size)
|
||||||
: VideoCommon::BufferBlock{cpu_addr, size}, scheduler{scheduler_}, staging_pool{staging_pool_} {
|
: BufferBlock{cpu_addr, size}, scheduler{scheduler_}, staging_pool{staging_pool_} {
|
||||||
VkBufferCreateInfo ci;
|
const VkBufferCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.size = static_cast<VkDeviceSize>(size);
|
.size = static_cast<VkDeviceSize>(size),
|
||||||
ci.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
ci.queueFamilyIndexCount = 0;
|
.queueFamilyIndexCount = 0,
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
.pQueueFamilyIndices = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
buffer.handle = device.GetLogical().CreateBuffer(ci);
|
buffer.handle = device.GetLogical().CreateBuffer(ci);
|
||||||
buffer.commit = memory_manager.Commit(buffer.handle, false);
|
buffer.commit = memory_manager.Commit(buffer.handle, false);
|
||||||
|
@ -66,16 +67,17 @@ void Buffer::Upload(std::size_t offset, std::size_t size, const u8* data) {
|
||||||
scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
|
||||||
cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, size});
|
cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, size});
|
||||||
|
|
||||||
VkBufferMemoryBarrier barrier;
|
const VkBufferMemoryBarrier barrier{
|
||||||
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||||
barrier.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||||
barrier.dstAccessMask = UPLOAD_ACCESS_BARRIERS;
|
.dstAccessMask = UPLOAD_ACCESS_BARRIERS,
|
||||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
barrier.buffer = handle;
|
.buffer = handle,
|
||||||
barrier.offset = offset;
|
.offset = offset,
|
||||||
barrier.size = size;
|
.size = size,
|
||||||
|
};
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
|
||||||
barrier, {});
|
barrier, {});
|
||||||
});
|
});
|
||||||
|
@ -87,16 +89,17 @@ void Buffer::Download(std::size_t offset, std::size_t size, u8* data) {
|
||||||
|
|
||||||
const VkBuffer handle = Handle();
|
const VkBuffer handle = Handle();
|
||||||
scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
|
||||||
VkBufferMemoryBarrier barrier;
|
const VkBufferMemoryBarrier barrier{
|
||||||
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||||
barrier.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
|
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||||
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
|
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
|
||||||
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
barrier.buffer = handle;
|
.buffer = handle,
|
||||||
barrier.offset = offset;
|
.offset = offset,
|
||||||
barrier.size = size;
|
.size = size,
|
||||||
|
};
|
||||||
|
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
|
||||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
|
||||||
|
|
|
@ -115,32 +115,32 @@ constexpr u8 quad_array[] = {
|
||||||
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
|
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
|
||||||
|
|
||||||
VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
|
VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
|
||||||
VkDescriptorSetLayoutBinding binding;
|
return {
|
||||||
binding.binding = 0;
|
.binding = 0,
|
||||||
binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||||
binding.descriptorCount = 1;
|
.descriptorCount = 1,
|
||||||
binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
binding.pImmutableSamplers = nullptr;
|
.pImmutableSamplers = nullptr,
|
||||||
return binding;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
|
VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
|
||||||
VkDescriptorUpdateTemplateEntryKHR entry;
|
return {
|
||||||
entry.dstBinding = 0;
|
.dstBinding = 0,
|
||||||
entry.dstArrayElement = 0;
|
.dstArrayElement = 0,
|
||||||
entry.descriptorCount = 1;
|
.descriptorCount = 1,
|
||||||
entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||||
entry.offset = 0;
|
.offset = 0,
|
||||||
entry.stride = sizeof(DescriptorUpdateEntry);
|
.stride = sizeof(DescriptorUpdateEntry),
|
||||||
return entry;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
|
VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
|
||||||
VkPushConstantRange range;
|
return {
|
||||||
range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
range.offset = 0;
|
.offset = 0,
|
||||||
range.size = static_cast<u32>(size);
|
.size = static_cast<u32>(size),
|
||||||
return range;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uint8 SPIR-V module. Generated from the "shaders/" directory.
|
// Uint8 SPIR-V module. Generated from the "shaders/" directory.
|
||||||
|
@ -344,29 +344,33 @@ constexpr u8 QUAD_INDEXED_SPV[] = {
|
||||||
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
|
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
|
||||||
|
|
||||||
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
|
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
|
||||||
std::array<VkDescriptorSetLayoutBinding, 2> bindings;
|
return {{
|
||||||
bindings[0].binding = 0;
|
{
|
||||||
bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
.binding = 0,
|
||||||
bindings[0].descriptorCount = 1;
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||||
bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
|
.descriptorCount = 1,
|
||||||
bindings[0].pImmutableSamplers = nullptr;
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
bindings[1].binding = 1;
|
.pImmutableSamplers = nullptr,
|
||||||
bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
},
|
||||||
bindings[1].descriptorCount = 1;
|
{
|
||||||
bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
|
.binding = 1,
|
||||||
bindings[1].pImmutableSamplers = nullptr;
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||||
return bindings;
|
.descriptorCount = 1,
|
||||||
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
|
.pImmutableSamplers = nullptr,
|
||||||
|
},
|
||||||
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
|
VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
|
||||||
VkDescriptorUpdateTemplateEntryKHR entry;
|
return {
|
||||||
entry.dstBinding = 0;
|
.dstBinding = 0,
|
||||||
entry.dstArrayElement = 0;
|
.dstArrayElement = 0,
|
||||||
entry.descriptorCount = 2;
|
.descriptorCount = 2,
|
||||||
entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||||
entry.offset = 0;
|
.offset = 0,
|
||||||
entry.stride = sizeof(DescriptorUpdateEntry);
|
.stride = sizeof(DescriptorUpdateEntry),
|
||||||
return entry;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
@ -376,37 +380,37 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
|
||||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||||
vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
|
vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
|
||||||
const u8* code) {
|
const u8* code) {
|
||||||
VkDescriptorSetLayoutCreateInfo descriptor_layout_ci;
|
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
|
||||||
descriptor_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||||
descriptor_layout_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
descriptor_layout_ci.flags = 0;
|
.flags = 0,
|
||||||
descriptor_layout_ci.bindingCount = bindings.size();
|
.bindingCount = bindings.size(),
|
||||||
descriptor_layout_ci.pBindings = bindings.data();
|
.pBindings = bindings.data(),
|
||||||
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(descriptor_layout_ci);
|
});
|
||||||
|
|
||||||
VkPipelineLayoutCreateInfo pipeline_layout_ci;
|
layout = device.GetLogical().CreatePipelineLayout({
|
||||||
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
||||||
pipeline_layout_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
pipeline_layout_ci.flags = 0;
|
.flags = 0,
|
||||||
pipeline_layout_ci.setLayoutCount = 1;
|
.setLayoutCount = 1,
|
||||||
pipeline_layout_ci.pSetLayouts = descriptor_set_layout.address();
|
.pSetLayouts = descriptor_set_layout.address(),
|
||||||
pipeline_layout_ci.pushConstantRangeCount = push_constants.size();
|
.pushConstantRangeCount = push_constants.size(),
|
||||||
pipeline_layout_ci.pPushConstantRanges = push_constants.data();
|
.pPushConstantRanges = push_constants.data(),
|
||||||
layout = device.GetLogical().CreatePipelineLayout(pipeline_layout_ci);
|
});
|
||||||
|
|
||||||
if (!templates.empty()) {
|
if (!templates.empty()) {
|
||||||
VkDescriptorUpdateTemplateCreateInfoKHR template_ci;
|
descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR({
|
||||||
template_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
|
||||||
template_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
template_ci.flags = 0;
|
.flags = 0,
|
||||||
template_ci.descriptorUpdateEntryCount = templates.size();
|
.descriptorUpdateEntryCount = templates.size(),
|
||||||
template_ci.pDescriptorUpdateEntries = templates.data();
|
.pDescriptorUpdateEntries = templates.data(),
|
||||||
template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
|
.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
|
||||||
template_ci.descriptorSetLayout = *descriptor_set_layout;
|
.descriptorSetLayout = *descriptor_set_layout,
|
||||||
template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||||
template_ci.pipelineLayout = *layout;
|
.pipelineLayout = *layout,
|
||||||
template_ci.set = 0;
|
.set = 0,
|
||||||
descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR(template_ci);
|
});
|
||||||
|
|
||||||
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
|
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
|
||||||
}
|
}
|
||||||
|
@ -414,32 +418,32 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
|
||||||
auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
|
auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
|
||||||
std::memcpy(code_copy.get(), code, code_size);
|
std::memcpy(code_copy.get(), code, code_size);
|
||||||
|
|
||||||
VkShaderModuleCreateInfo module_ci;
|
module = device.GetLogical().CreateShaderModule({
|
||||||
module_ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||||
module_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
module_ci.flags = 0;
|
.flags = 0,
|
||||||
module_ci.codeSize = code_size;
|
.codeSize = code_size,
|
||||||
module_ci.pCode = code_copy.get();
|
.pCode = code_copy.get(),
|
||||||
module = device.GetLogical().CreateShaderModule(module_ci);
|
});
|
||||||
|
|
||||||
VkComputePipelineCreateInfo pipeline_ci;
|
pipeline = device.GetLogical().CreateComputePipeline({
|
||||||
pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||||
pipeline_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
pipeline_ci.flags = 0;
|
.flags = 0,
|
||||||
pipeline_ci.layout = *layout;
|
.stage =
|
||||||
pipeline_ci.basePipelineHandle = nullptr;
|
{
|
||||||
pipeline_ci.basePipelineIndex = 0;
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
VkPipelineShaderStageCreateInfo& stage_ci = pipeline_ci.stage;
|
.flags = 0,
|
||||||
stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
stage_ci.pNext = nullptr;
|
.module = *module,
|
||||||
stage_ci.flags = 0;
|
.pName = "main",
|
||||||
stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
|
.pSpecializationInfo = nullptr,
|
||||||
stage_ci.module = *module;
|
},
|
||||||
stage_ci.pName = "main";
|
.layout = *layout,
|
||||||
stage_ci.pSpecializationInfo = nullptr;
|
.basePipelineHandle = nullptr,
|
||||||
|
.basePipelineIndex = 0,
|
||||||
pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
VKComputePass::~VKComputePass() = default;
|
VKComputePass::~VKComputePass() = default;
|
||||||
|
|
|
@ -43,12 +43,13 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
|
||||||
const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
|
const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
|
||||||
// TODO(Rodrigo): Maybe make individual bindings here?
|
// TODO(Rodrigo): Maybe make individual bindings here?
|
||||||
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
|
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
|
||||||
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
|
bindings.push_back({
|
||||||
entry.binding = binding++;
|
.binding = binding++,
|
||||||
entry.descriptorType = descriptor_type;
|
.descriptorType = descriptor_type,
|
||||||
entry.descriptorCount = 1;
|
.descriptorCount = 1,
|
||||||
entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
|
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
entry.pImmutableSamplers = nullptr;
|
.pImmutableSamplers = nullptr,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
|
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
|
||||||
|
@ -58,25 +59,25 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
|
||||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
|
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
|
||||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
|
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
|
||||||
|
|
||||||
VkDescriptorSetLayoutCreateInfo ci;
|
return device.GetLogical().CreateDescriptorSetLayout({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.bindingCount = static_cast<u32>(bindings.size());
|
.bindingCount = static_cast<u32>(bindings.size()),
|
||||||
ci.pBindings = bindings.data();
|
.pBindings = bindings.data(),
|
||||||
return device.GetLogical().CreateDescriptorSetLayout(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
|
vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
|
||||||
VkPipelineLayoutCreateInfo ci;
|
return device.GetLogical().CreatePipelineLayout({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.setLayoutCount = 1;
|
.setLayoutCount = 1,
|
||||||
ci.pSetLayouts = descriptor_set_layout.address();
|
.pSetLayouts = descriptor_set_layout.address(),
|
||||||
ci.pushConstantRangeCount = 0;
|
.pushConstantRangeCount = 0,
|
||||||
ci.pPushConstantRanges = nullptr;
|
.pPushConstantRanges = nullptr,
|
||||||
return device.GetLogical().CreatePipelineLayout(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
|
vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
|
||||||
|
@ -89,59 +90,63 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkDescriptorUpdateTemplateCreateInfoKHR ci;
|
return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
|
.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
|
||||||
ci.pDescriptorUpdateEntries = template_entries.data();
|
.pDescriptorUpdateEntries = template_entries.data(),
|
||||||
ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
|
.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
|
||||||
ci.descriptorSetLayout = *descriptor_set_layout;
|
.descriptorSetLayout = *descriptor_set_layout,
|
||||||
ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||||
ci.pipelineLayout = *layout;
|
.pipelineLayout = *layout,
|
||||||
ci.set = DESCRIPTOR_SET;
|
.set = DESCRIPTOR_SET,
|
||||||
return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
|
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
|
||||||
device.SaveShader(code);
|
device.SaveShader(code);
|
||||||
|
|
||||||
VkShaderModuleCreateInfo ci;
|
return device.GetLogical().CreateShaderModule({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.codeSize = code.size() * sizeof(u32);
|
.codeSize = code.size() * sizeof(u32),
|
||||||
ci.pCode = code.data();
|
.pCode = code.data(),
|
||||||
return device.GetLogical().CreateShaderModule(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::Pipeline VKComputePipeline::CreatePipeline() const {
|
vk::Pipeline VKComputePipeline::CreatePipeline() const {
|
||||||
VkComputePipelineCreateInfo ci;
|
|
||||||
VkPipelineShaderStageCreateInfo& stage_ci = ci.stage;
|
|
||||||
stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
|
||||||
stage_ci.pNext = nullptr;
|
|
||||||
stage_ci.flags = 0;
|
|
||||||
stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
|
|
||||||
stage_ci.module = *shader_module;
|
|
||||||
stage_ci.pName = "main";
|
|
||||||
stage_ci.pSpecializationInfo = nullptr;
|
|
||||||
|
|
||||||
VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
|
VkComputePipelineCreateInfo ci{
|
||||||
subgroup_size_ci.sType =
|
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||||
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
|
.pNext = nullptr,
|
||||||
subgroup_size_ci.pNext = nullptr;
|
.flags = 0,
|
||||||
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
|
.stage =
|
||||||
|
{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.flags = 0,
|
||||||
|
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||||
|
.module = *shader_module,
|
||||||
|
.pName = "main",
|
||||||
|
.pSpecializationInfo = nullptr,
|
||||||
|
},
|
||||||
|
.layout = *layout,
|
||||||
|
.basePipelineHandle = nullptr,
|
||||||
|
.basePipelineIndex = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.requiredSubgroupSize = GuestWarpSize,
|
||||||
|
};
|
||||||
|
|
||||||
if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
|
if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
|
||||||
stage_ci.pNext = &subgroup_size_ci;
|
ci.stage.pNext = &subgroup_size_ci;
|
||||||
}
|
}
|
||||||
|
|
||||||
ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
|
|
||||||
ci.pNext = nullptr;
|
|
||||||
ci.flags = 0;
|
|
||||||
ci.layout = *layout;
|
|
||||||
ci.basePipelineHandle = nullptr;
|
|
||||||
ci.basePipelineIndex = 0;
|
|
||||||
return device.GetLogical().CreateComputePipeline(ci);
|
return device.GetLogical().CreateComputePipeline(ci);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,27 +43,30 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
|
||||||
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
|
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
|
||||||
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
|
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
|
||||||
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
|
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
|
||||||
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
|
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40},
|
||||||
|
};
|
||||||
|
|
||||||
VkDescriptorPoolCreateInfo ci;
|
const VkDescriptorPoolCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
|
.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
|
||||||
ci.maxSets = num_sets;
|
.maxSets = num_sets,
|
||||||
ci.poolSizeCount = static_cast<u32>(std::size(pool_sizes));
|
.poolSizeCount = static_cast<u32>(std::size(pool_sizes)),
|
||||||
ci.pPoolSizes = std::data(pool_sizes);
|
.pPoolSizes = std::data(pool_sizes),
|
||||||
|
};
|
||||||
return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
|
return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
|
vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
|
||||||
std::size_t count) {
|
std::size_t count) {
|
||||||
const std::vector layout_copies(count, layout);
|
const std::vector layout_copies(count, layout);
|
||||||
VkDescriptorSetAllocateInfo ai;
|
VkDescriptorSetAllocateInfo ai{
|
||||||
ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
||||||
ai.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ai.descriptorPool = **active_pool;
|
.descriptorPool = **active_pool,
|
||||||
ai.descriptorSetCount = static_cast<u32>(count);
|
.descriptorSetCount = static_cast<u32>(count),
|
||||||
ai.pSetLayouts = layout_copies.data();
|
.pSetLayouts = layout_copies.data(),
|
||||||
|
};
|
||||||
|
|
||||||
vk::DescriptorSets sets = active_pool->Allocate(ai);
|
vk::DescriptorSets sets = active_pool->Allocate(ai);
|
||||||
if (!sets.IsOutOfPoolMemory()) {
|
if (!sets.IsOutOfPoolMemory()) {
|
||||||
|
|
|
@ -102,21 +102,29 @@ bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num
|
||||||
|
|
||||||
void VKImage::CreatePresentView() {
|
void VKImage::CreatePresentView() {
|
||||||
// Image type has to be 2D to be presented.
|
// Image type has to be 2D to be presented.
|
||||||
VkImageViewCreateInfo image_view_ci;
|
present_view = device.GetLogical().CreateImageView({
|
||||||
image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||||
image_view_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
image_view_ci.flags = 0;
|
.flags = 0,
|
||||||
image_view_ci.image = *image;
|
.image = *image,
|
||||||
image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
||||||
image_view_ci.format = format;
|
.format = format,
|
||||||
image_view_ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
|
.components =
|
||||||
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
|
{
|
||||||
image_view_ci.subresourceRange.aspectMask = aspect_mask;
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
image_view_ci.subresourceRange.baseMipLevel = 0;
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
image_view_ci.subresourceRange.levelCount = 1;
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
image_view_ci.subresourceRange.baseArrayLayer = 0;
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
image_view_ci.subresourceRange.layerCount = 1;
|
},
|
||||||
present_view = device.GetLogical().CreateImageView(image_view_ci);
|
.subresourceRange =
|
||||||
|
{
|
||||||
|
.aspectMask = aspect_mask,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = 1,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = 1,
|
||||||
|
},
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
|
VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
|
||||||
|
|
|
@ -178,13 +178,12 @@ bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 t
|
||||||
}();
|
}();
|
||||||
|
|
||||||
// Try to allocate found type.
|
// Try to allocate found type.
|
||||||
VkMemoryAllocateInfo memory_ai;
|
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
|
||||||
memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||||
memory_ai.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
memory_ai.allocationSize = size;
|
.allocationSize = size,
|
||||||
memory_ai.memoryTypeIndex = type;
|
.memoryTypeIndex = type,
|
||||||
|
});
|
||||||
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
|
|
||||||
if (!memory) {
|
if (!memory) {
|
||||||
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
|
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -88,12 +88,13 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
|
||||||
// Combined image samplers can be arrayed.
|
// Combined image samplers can be arrayed.
|
||||||
count = container[i].size;
|
count = container[i].size;
|
||||||
}
|
}
|
||||||
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
|
bindings.push_back({
|
||||||
entry.binding = binding++;
|
.binding = binding++,
|
||||||
entry.descriptorType = descriptor_type;
|
.descriptorType = descriptor_type,
|
||||||
entry.descriptorCount = count;
|
.descriptorCount = count,
|
||||||
entry.stageFlags = stage_flags;
|
.stageFlags = stage_flags,
|
||||||
entry.pImmutableSamplers = nullptr;
|
.pImmutableSamplers = nullptr,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,10 +260,10 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Specialization specialization;
|
const Specialization specialization{
|
||||||
specialization.workgroup_size = key.workgroup_size;
|
.workgroup_size = key.workgroup_size,
|
||||||
specialization.shared_memory_size = key.shared_memory_size;
|
.shared_memory_size = key.shared_memory_size,
|
||||||
|
};
|
||||||
const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
|
const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
|
||||||
shader->GetRegistry(), specialization),
|
shader->GetRegistry(), specialization),
|
||||||
shader->GetEntries()};
|
shader->GetEntries()};
|
||||||
|
@ -370,13 +371,14 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
|
||||||
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
|
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
|
||||||
for (u32 i = 0; i < count; ++i) {
|
for (u32 i = 0; i < count; ++i) {
|
||||||
const u32 num_samplers = container[i].size;
|
const u32 num_samplers = container[i].size;
|
||||||
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
|
template_entries.push_back({
|
||||||
entry.dstBinding = binding;
|
.dstBinding = binding,
|
||||||
entry.dstArrayElement = 0;
|
.dstArrayElement = 0,
|
||||||
entry.descriptorCount = num_samplers;
|
.descriptorCount = num_samplers,
|
||||||
entry.descriptorType = descriptor_type;
|
.descriptorType = descriptor_type,
|
||||||
entry.offset = offset;
|
.offset = offset,
|
||||||
entry.stride = entry_size;
|
.stride = entry_size,
|
||||||
|
});
|
||||||
|
|
||||||
++binding;
|
++binding;
|
||||||
offset += num_samplers * entry_size;
|
offset += num_samplers * entry_size;
|
||||||
|
@ -389,22 +391,24 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
|
||||||
// Nvidia has a bug where updating multiple texels at once causes the driver to crash.
|
// Nvidia has a bug where updating multiple texels at once causes the driver to crash.
|
||||||
// Note: Fixed in driver Windows 443.24, Linux 440.66.15
|
// Note: Fixed in driver Windows 443.24, Linux 440.66.15
|
||||||
for (u32 i = 0; i < count; ++i) {
|
for (u32 i = 0; i < count; ++i) {
|
||||||
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
|
template_entries.push_back({
|
||||||
entry.dstBinding = binding + i;
|
.dstBinding = binding + i,
|
||||||
entry.dstArrayElement = 0;
|
.dstArrayElement = 0,
|
||||||
entry.descriptorCount = 1;
|
.descriptorCount = 1,
|
||||||
entry.descriptorType = descriptor_type;
|
.descriptorType = descriptor_type,
|
||||||
entry.offset = static_cast<std::size_t>(offset + i * entry_size);
|
.offset = static_cast<std::size_t>(offset + i * entry_size),
|
||||||
entry.stride = entry_size;
|
.stride = entry_size,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
} else if (count > 0) {
|
} else if (count > 0) {
|
||||||
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
|
template_entries.push_back({
|
||||||
entry.dstBinding = binding;
|
.dstBinding = binding,
|
||||||
entry.dstArrayElement = 0;
|
.dstArrayElement = 0,
|
||||||
entry.descriptorCount = count;
|
.descriptorCount = count,
|
||||||
entry.descriptorType = descriptor_type;
|
.descriptorType = descriptor_type,
|
||||||
entry.offset = offset;
|
.offset = offset,
|
||||||
entry.stride = entry_size;
|
.stride = entry_size,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
offset += count * entry_size;
|
offset += count * entry_size;
|
||||||
binding += count;
|
binding += count;
|
||||||
|
|
|
@ -47,14 +47,14 @@ std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
|
||||||
void QueryPool::Allocate(std::size_t begin, std::size_t end) {
|
void QueryPool::Allocate(std::size_t begin, std::size_t end) {
|
||||||
usage.resize(end);
|
usage.resize(end);
|
||||||
|
|
||||||
VkQueryPoolCreateInfo query_pool_ci;
|
pools.push_back(device->GetLogical().CreateQueryPool({
|
||||||
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
|
||||||
query_pool_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
query_pool_ci.flags = 0;
|
.flags = 0,
|
||||||
query_pool_ci.queryType = GetTarget(type);
|
.queryType = GetTarget(type),
|
||||||
query_pool_ci.queryCount = static_cast<u32>(end - begin);
|
.queryCount = static_cast<u32>(end - begin),
|
||||||
query_pool_ci.pipelineStatistics = 0;
|
.pipelineStatistics = 0,
|
||||||
pools.push_back(device->GetLogical().CreateQueryPool(query_pool_ci));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
|
void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
|
||||||
|
|
|
@ -64,20 +64,22 @@ VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::si
|
||||||
const auto& src = regs.viewport_transform[index];
|
const auto& src = regs.viewport_transform[index];
|
||||||
const float width = src.scale_x * 2.0f;
|
const float width = src.scale_x * 2.0f;
|
||||||
const float height = src.scale_y * 2.0f;
|
const float height = src.scale_y * 2.0f;
|
||||||
|
|
||||||
VkViewport viewport;
|
|
||||||
viewport.x = src.translate_x - src.scale_x;
|
|
||||||
viewport.y = src.translate_y - src.scale_y;
|
|
||||||
viewport.width = width != 0.0f ? width : 1.0f;
|
|
||||||
viewport.height = height != 0.0f ? height : 1.0f;
|
|
||||||
|
|
||||||
const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f;
|
const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f;
|
||||||
viewport.minDepth = src.translate_z - src.scale_z * reduce_z;
|
|
||||||
viewport.maxDepth = src.translate_z + src.scale_z;
|
VkViewport viewport{
|
||||||
|
.x = src.translate_x - src.scale_x,
|
||||||
|
.y = src.translate_y - src.scale_y,
|
||||||
|
.width = width != 0.0f ? width : 1.0f,
|
||||||
|
.height = height != 0.0f ? height : 1.0f,
|
||||||
|
.minDepth = src.translate_z - src.scale_z * reduce_z,
|
||||||
|
.maxDepth = src.translate_z + src.scale_z,
|
||||||
|
};
|
||||||
|
|
||||||
if (!device.IsExtDepthRangeUnrestrictedSupported()) {
|
if (!device.IsExtDepthRangeUnrestrictedSupported()) {
|
||||||
viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
|
viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
|
||||||
viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
|
viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
|
||||||
}
|
}
|
||||||
|
|
||||||
return viewport;
|
return viewport;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -508,10 +510,11 @@ void RasterizerVulkan::Clear() {
|
||||||
|
|
||||||
const u32 color_attachment = regs.clear_buffers.RT;
|
const u32 color_attachment = regs.clear_buffers.RT;
|
||||||
scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
|
scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
|
||||||
VkClearAttachment attachment;
|
const VkClearAttachment attachment{
|
||||||
attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
attachment.colorAttachment = color_attachment;
|
.colorAttachment = color_attachment,
|
||||||
attachment.clearValue = clear_value;
|
.clearValue = clear_value,
|
||||||
|
};
|
||||||
cmdbuf.ClearAttachments(attachment, clear_rect);
|
cmdbuf.ClearAttachments(attachment, clear_rect);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -551,13 +554,16 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
|
||||||
query_cache.UpdateCounters();
|
query_cache.UpdateCounters();
|
||||||
|
|
||||||
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
|
const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
|
||||||
ComputePipelineCacheKey key;
|
auto& pipeline = pipeline_cache.GetComputePipeline({
|
||||||
key.shader = code_addr;
|
.shader = code_addr,
|
||||||
key.shared_memory_size = launch_desc.shared_alloc;
|
.shared_memory_size = launch_desc.shared_alloc,
|
||||||
key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y,
|
.workgroup_size =
|
||||||
launch_desc.block_dim_z};
|
{
|
||||||
|
launch_desc.block_dim_x,
|
||||||
auto& pipeline = pipeline_cache.GetComputePipeline(key);
|
launch_desc.block_dim_y,
|
||||||
|
launch_desc.block_dim_z,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
// Compute dispatches can't be executed inside a renderpass
|
// Compute dispatches can't be executed inside a renderpass
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
@ -841,17 +847,17 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
|
||||||
const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
|
const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
|
||||||
auto& framebuffer = fbentry->second;
|
auto& framebuffer = fbentry->second;
|
||||||
if (is_cache_miss) {
|
if (is_cache_miss) {
|
||||||
VkFramebufferCreateInfo framebuffer_ci;
|
framebuffer = device.GetLogical().CreateFramebuffer({
|
||||||
framebuffer_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
|
||||||
framebuffer_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
framebuffer_ci.flags = 0;
|
.flags = 0,
|
||||||
framebuffer_ci.renderPass = key.renderpass;
|
.renderPass = key.renderpass,
|
||||||
framebuffer_ci.attachmentCount = static_cast<u32>(key.views.size());
|
.attachmentCount = static_cast<u32>(key.views.size()),
|
||||||
framebuffer_ci.pAttachments = key.views.data();
|
.pAttachments = key.views.data(),
|
||||||
framebuffer_ci.width = key.width;
|
.width = key.width,
|
||||||
framebuffer_ci.height = key.height;
|
.height = key.height,
|
||||||
framebuffer_ci.layers = key.layers;
|
.layers = key.layers,
|
||||||
framebuffer = device.GetLogical().CreateFramebuffer(framebuffer_ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return {*framebuffer, VkExtent2D{key.width, key.height}};
|
return {*framebuffer, VkExtent2D{key.width, key.height}};
|
||||||
|
@ -1553,17 +1559,17 @@ VkBuffer RasterizerVulkan::DefaultBuffer() {
|
||||||
return *default_buffer;
|
return *default_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkBufferCreateInfo ci;
|
default_buffer = device.GetLogical().CreateBuffer({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.size = DEFAULT_BUFFER_SIZE;
|
.size = DEFAULT_BUFFER_SIZE,
|
||||||
ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
|
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
|
||||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
|
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
ci.queueFamilyIndexCount = 0;
|
.queueFamilyIndexCount = 0,
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
.pQueueFamilyIndices = nullptr,
|
||||||
default_buffer = device.GetLogical().CreateBuffer(ci);
|
});
|
||||||
default_buffer_commit = memory_manager.Commit(default_buffer, false);
|
default_buffer_commit = memory_manager.Commit(default_buffer, false);
|
||||||
|
|
||||||
scheduler.RequestOutsideRenderPassOperationContext();
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
|
|
@ -39,10 +39,14 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
|
||||||
|
|
||||||
vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
|
vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
|
||||||
using namespace VideoCore::Surface;
|
using namespace VideoCore::Surface;
|
||||||
std::vector<VkAttachmentDescription> descriptors;
|
|
||||||
std::vector<VkAttachmentReference> color_references;
|
|
||||||
|
|
||||||
const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
|
const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
|
||||||
|
|
||||||
|
std::vector<VkAttachmentDescription> descriptors;
|
||||||
|
descriptors.reserve(num_attachments);
|
||||||
|
|
||||||
|
std::vector<VkAttachmentReference> color_references;
|
||||||
|
color_references.reserve(num_attachments);
|
||||||
|
|
||||||
for (std::size_t rt = 0; rt < num_attachments; ++rt) {
|
for (std::size_t rt = 0; rt < num_attachments; ++rt) {
|
||||||
const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
|
const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
|
||||||
const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
|
const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
|
||||||
|
@ -54,20 +58,22 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
|
||||||
const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
|
const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
|
||||||
? VK_IMAGE_LAYOUT_GENERAL
|
? VK_IMAGE_LAYOUT_GENERAL
|
||||||
: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
|
: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
|
||||||
VkAttachmentDescription& descriptor = descriptors.emplace_back();
|
descriptors.push_back({
|
||||||
descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
|
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
|
||||||
descriptor.format = format.format;
|
.format = format.format,
|
||||||
descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
||||||
descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
|
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||||
descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||||
descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
||||||
descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
|
||||||
descriptor.initialLayout = color_layout;
|
.initialLayout = color_layout,
|
||||||
descriptor.finalLayout = color_layout;
|
.finalLayout = color_layout,
|
||||||
|
});
|
||||||
|
|
||||||
VkAttachmentReference& reference = color_references.emplace_back();
|
color_references.push_back({
|
||||||
reference.attachment = static_cast<u32>(rt);
|
.attachment = static_cast<u32>(rt),
|
||||||
reference.layout = color_layout;
|
.layout = color_layout,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
VkAttachmentReference zeta_attachment_ref;
|
VkAttachmentReference zeta_attachment_ref;
|
||||||
|
@ -82,32 +88,36 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
|
||||||
const VkImageLayout zeta_layout = params.zeta_texception != 0
|
const VkImageLayout zeta_layout = params.zeta_texception != 0
|
||||||
? VK_IMAGE_LAYOUT_GENERAL
|
? VK_IMAGE_LAYOUT_GENERAL
|
||||||
: VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
: VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
||||||
VkAttachmentDescription& descriptor = descriptors.emplace_back();
|
descriptors.push_back({
|
||||||
descriptor.flags = 0;
|
.flags = 0,
|
||||||
descriptor.format = format.format;
|
.format = format.format,
|
||||||
descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
||||||
descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
|
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||||
descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||||
descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||||
descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||||
descriptor.initialLayout = zeta_layout;
|
.initialLayout = zeta_layout,
|
||||||
descriptor.finalLayout = zeta_layout;
|
.finalLayout = zeta_layout,
|
||||||
|
});
|
||||||
|
|
||||||
zeta_attachment_ref.attachment = static_cast<u32>(num_attachments);
|
zeta_attachment_ref = {
|
||||||
zeta_attachment_ref.layout = zeta_layout;
|
.attachment = static_cast<u32>(num_attachments),
|
||||||
|
.layout = zeta_layout,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkSubpassDescription subpass_description;
|
const VkSubpassDescription subpass_description{
|
||||||
subpass_description.flags = 0;
|
.flags = 0,
|
||||||
subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||||
subpass_description.inputAttachmentCount = 0;
|
.inputAttachmentCount = 0,
|
||||||
subpass_description.pInputAttachments = nullptr;
|
.pInputAttachments = nullptr,
|
||||||
subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
|
.colorAttachmentCount = static_cast<u32>(color_references.size()),
|
||||||
subpass_description.pColorAttachments = color_references.data();
|
.pColorAttachments = color_references.data(),
|
||||||
subpass_description.pResolveAttachments = nullptr;
|
.pResolveAttachments = nullptr,
|
||||||
subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr;
|
.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr,
|
||||||
subpass_description.preserveAttachmentCount = 0;
|
.preserveAttachmentCount = 0,
|
||||||
subpass_description.pPreserveAttachments = nullptr;
|
.pPreserveAttachments = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
VkAccessFlags access = 0;
|
VkAccessFlags access = 0;
|
||||||
VkPipelineStageFlags stage = 0;
|
VkPipelineStageFlags stage = 0;
|
||||||
|
@ -122,26 +132,27 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
|
||||||
stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkSubpassDependency subpass_dependency;
|
const VkSubpassDependency subpass_dependency{
|
||||||
subpass_dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
|
.srcSubpass = VK_SUBPASS_EXTERNAL,
|
||||||
subpass_dependency.dstSubpass = 0;
|
.dstSubpass = 0,
|
||||||
subpass_dependency.srcStageMask = stage;
|
.srcStageMask = stage,
|
||||||
subpass_dependency.dstStageMask = stage;
|
.dstStageMask = stage,
|
||||||
subpass_dependency.srcAccessMask = 0;
|
.srcAccessMask = 0,
|
||||||
subpass_dependency.dstAccessMask = access;
|
.dstAccessMask = access,
|
||||||
subpass_dependency.dependencyFlags = 0;
|
.dependencyFlags = 0,
|
||||||
|
};
|
||||||
|
|
||||||
VkRenderPassCreateInfo ci;
|
return device.GetLogical().CreateRenderPass({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.attachmentCount = static_cast<u32>(descriptors.size());
|
.attachmentCount = static_cast<u32>(descriptors.size()),
|
||||||
ci.pAttachments = descriptors.data();
|
.pAttachments = descriptors.data(),
|
||||||
ci.subpassCount = 1;
|
.subpassCount = 1,
|
||||||
ci.pSubpasses = &subpass_description;
|
.pSubpasses = &subpass_description,
|
||||||
ci.dependencyCount = 1;
|
.dependencyCount = 1,
|
||||||
ci.pDependencies = &subpass_dependency;
|
.pDependencies = &subpass_dependency,
|
||||||
return device.GetLogical().CreateRenderPass(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -18,33 +18,32 @@ namespace {
|
||||||
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
|
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
|
||||||
constexpr std::size_t FENCES_GROW_STEP = 0x40;
|
constexpr std::size_t FENCES_GROW_STEP = 0x40;
|
||||||
|
|
||||||
VkFenceCreateInfo BuildFenceCreateInfo() {
|
constexpr VkFenceCreateInfo BuildFenceCreateInfo() {
|
||||||
VkFenceCreateInfo fence_ci;
|
return {
|
||||||
fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
|
||||||
fence_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
fence_ci.flags = 0;
|
.flags = 0,
|
||||||
return fence_ci;
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
class CommandBufferPool final : public VKFencedPool {
|
class CommandBufferPool final : public VKFencedPool {
|
||||||
public:
|
public:
|
||||||
CommandBufferPool(const VKDevice& device)
|
explicit CommandBufferPool(const VKDevice& device)
|
||||||
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
|
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
|
||||||
|
|
||||||
void Allocate(std::size_t begin, std::size_t end) override {
|
void Allocate(std::size_t begin, std::size_t end) override {
|
||||||
// Command buffers are going to be commited, recorded, executed every single usage cycle.
|
// Command buffers are going to be commited, recorded, executed every single usage cycle.
|
||||||
// They are also going to be reseted when commited.
|
// They are also going to be reseted when commited.
|
||||||
VkCommandPoolCreateInfo command_pool_ci;
|
|
||||||
command_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
|
|
||||||
command_pool_ci.pNext = nullptr;
|
|
||||||
command_pool_ci.flags =
|
|
||||||
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
|
|
||||||
command_pool_ci.queueFamilyIndex = device.GetGraphicsFamily();
|
|
||||||
|
|
||||||
Pool& pool = pools.emplace_back();
|
Pool& pool = pools.emplace_back();
|
||||||
pool.handle = device.GetLogical().CreateCommandPool(command_pool_ci);
|
pool.handle = device.GetLogical().CreateCommandPool({
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
|
||||||
|
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
|
||||||
|
.queueFamilyIndex = device.GetGraphicsFamily(),
|
||||||
|
});
|
||||||
pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
|
pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,32 +44,35 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c
|
||||||
const bool arbitrary_borders = device.IsExtCustomBorderColorSupported();
|
const bool arbitrary_borders = device.IsExtCustomBorderColorSupported();
|
||||||
const std::array color = tsc.GetBorderColor();
|
const std::array color = tsc.GetBorderColor();
|
||||||
|
|
||||||
VkSamplerCustomBorderColorCreateInfoEXT border;
|
VkSamplerCustomBorderColorCreateInfoEXT border{
|
||||||
border.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT;
|
.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT,
|
||||||
border.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
border.format = VK_FORMAT_UNDEFINED;
|
.format = VK_FORMAT_UNDEFINED,
|
||||||
|
};
|
||||||
std::memcpy(&border.customBorderColor, color.data(), sizeof(color));
|
std::memcpy(&border.customBorderColor, color.data(), sizeof(color));
|
||||||
|
|
||||||
VkSamplerCreateInfo ci;
|
return device.GetLogical().CreateSampler({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
|
||||||
ci.pNext = arbitrary_borders ? &border : nullptr;
|
.pNext = arbitrary_borders ? &border : nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter);
|
.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter),
|
||||||
ci.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter);
|
.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter),
|
||||||
ci.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter);
|
.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter),
|
||||||
ci.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter);
|
.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter),
|
||||||
ci.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter);
|
.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter),
|
||||||
ci.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter);
|
.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter),
|
||||||
ci.mipLodBias = tsc.GetLodBias();
|
.mipLodBias = tsc.GetLodBias(),
|
||||||
ci.anisotropyEnable = tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE;
|
.anisotropyEnable =
|
||||||
ci.maxAnisotropy = tsc.GetMaxAnisotropy();
|
static_cast<VkBool32>(tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE),
|
||||||
ci.compareEnable = tsc.depth_compare_enabled;
|
.maxAnisotropy = tsc.GetMaxAnisotropy(),
|
||||||
ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func);
|
.compareEnable = tsc.depth_compare_enabled,
|
||||||
ci.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.GetMinLod();
|
.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func),
|
||||||
ci.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.GetMaxLod();
|
.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.GetMinLod(),
|
||||||
ci.borderColor = arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color);
|
.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.GetMaxLod(),
|
||||||
ci.unnormalizedCoordinates = VK_FALSE;
|
.borderColor =
|
||||||
return device.GetLogical().CreateSampler(ci);
|
arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color),
|
||||||
|
.unnormalizedCoordinates = VK_FALSE,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
|
VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
|
||||||
|
|
|
@ -100,16 +100,19 @@ void VKScheduler::RequestRenderpass(VkRenderPass renderpass, VkFramebuffer frame
|
||||||
state.framebuffer = framebuffer;
|
state.framebuffer = framebuffer;
|
||||||
state.render_area = render_area;
|
state.render_area = render_area;
|
||||||
|
|
||||||
VkRenderPassBeginInfo renderpass_bi;
|
const VkRenderPassBeginInfo renderpass_bi{
|
||||||
renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||||
renderpass_bi.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
renderpass_bi.renderPass = renderpass;
|
.renderPass = renderpass,
|
||||||
renderpass_bi.framebuffer = framebuffer;
|
.framebuffer = framebuffer,
|
||||||
renderpass_bi.renderArea.offset.x = 0;
|
.renderArea =
|
||||||
renderpass_bi.renderArea.offset.y = 0;
|
{
|
||||||
renderpass_bi.renderArea.extent = render_area;
|
.offset = {.x = 0, .y = 0},
|
||||||
renderpass_bi.clearValueCount = 0;
|
.extent = render_area,
|
||||||
renderpass_bi.pClearValues = nullptr;
|
},
|
||||||
|
.clearValueCount = 0,
|
||||||
|
.pClearValues = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
|
Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
|
||||||
if (end_renderpass) {
|
if (end_renderpass) {
|
||||||
|
@ -157,16 +160,17 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||||
|
|
||||||
current_cmdbuf.End();
|
current_cmdbuf.End();
|
||||||
|
|
||||||
VkSubmitInfo submit_info;
|
const VkSubmitInfo submit_info{
|
||||||
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||||
submit_info.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
submit_info.waitSemaphoreCount = 0;
|
.waitSemaphoreCount = 0,
|
||||||
submit_info.pWaitSemaphores = nullptr;
|
.pWaitSemaphores = nullptr,
|
||||||
submit_info.pWaitDstStageMask = nullptr;
|
.pWaitDstStageMask = nullptr,
|
||||||
submit_info.commandBufferCount = 1;
|
.commandBufferCount = 1,
|
||||||
submit_info.pCommandBuffers = current_cmdbuf.address();
|
.pCommandBuffers = current_cmdbuf.address(),
|
||||||
submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
|
.signalSemaphoreCount = semaphore ? 1U : 0U,
|
||||||
submit_info.pSignalSemaphores = &semaphore;
|
.pSignalSemaphores = &semaphore,
|
||||||
|
};
|
||||||
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
|
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
|
||||||
case VK_SUCCESS:
|
case VK_SUCCESS:
|
||||||
break;
|
break;
|
||||||
|
@ -181,19 +185,18 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||||
void VKScheduler::AllocateNewContext() {
|
void VKScheduler::AllocateNewContext() {
|
||||||
++ticks;
|
++ticks;
|
||||||
|
|
||||||
VkCommandBufferBeginInfo cmdbuf_bi;
|
|
||||||
cmdbuf_bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
|
||||||
cmdbuf_bi.pNext = nullptr;
|
|
||||||
cmdbuf_bi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
|
||||||
cmdbuf_bi.pInheritanceInfo = nullptr;
|
|
||||||
|
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
current_fence = next_fence;
|
current_fence = next_fence;
|
||||||
next_fence = &resource_manager.CommitFence();
|
next_fence = &resource_manager.CommitFence();
|
||||||
|
|
||||||
current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
|
current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
|
||||||
device.GetDispatchLoader());
|
device.GetDispatchLoader());
|
||||||
current_cmdbuf.Begin(cmdbuf_bi);
|
current_cmdbuf.Begin({
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||||
|
.pInheritanceInfo = nullptr,
|
||||||
|
});
|
||||||
|
|
||||||
// Enable counters once again. These are disabled when a command buffer is finished.
|
// Enable counters once again. These are disabled when a command buffer is finished.
|
||||||
if (query_cache) {
|
if (query_cache) {
|
||||||
|
|
|
@ -19,13 +19,13 @@ vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, cons
|
||||||
const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
|
const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
|
||||||
std::memcpy(data.get(), code_data, code_size);
|
std::memcpy(data.get(), code_data, code_size);
|
||||||
|
|
||||||
VkShaderModuleCreateInfo ci;
|
return device.GetLogical().CreateShaderModule({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.codeSize = code_size;
|
.codeSize = code_size,
|
||||||
ci.pCode = data.get();
|
.pCode = data.get(),
|
||||||
return device.GetLogical().CreateShaderModule(ci);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -71,20 +71,19 @@ VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_
|
||||||
VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
|
VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
|
||||||
const u32 log2 = Common::Log2Ceil64(size);
|
const u32 log2 = Common::Log2Ceil64(size);
|
||||||
|
|
||||||
VkBufferCreateInfo ci;
|
|
||||||
ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
|
||||||
ci.pNext = nullptr;
|
|
||||||
ci.flags = 0;
|
|
||||||
ci.size = 1ULL << log2;
|
|
||||||
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
|
||||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
|
||||||
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
|
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
||||||
ci.queueFamilyIndexCount = 0;
|
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
|
||||||
|
|
||||||
auto buffer = std::make_unique<VKBuffer>();
|
auto buffer = std::make_unique<VKBuffer>();
|
||||||
buffer->handle = device.GetLogical().CreateBuffer(ci);
|
buffer->handle = device.GetLogical().CreateBuffer({
|
||||||
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.flags = 0,
|
||||||
|
.size = 1ULL << log2,
|
||||||
|
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
||||||
|
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||||
|
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
|
||||||
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
|
.queueFamilyIndexCount = 0,
|
||||||
|
.pQueueFamilyIndices = nullptr,
|
||||||
|
});
|
||||||
buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
|
buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
|
||||||
|
|
||||||
auto& entries = GetCache(host_visible)[log2].entries;
|
auto& entries = GetCache(host_visible)[log2].entries;
|
||||||
|
|
|
@ -122,30 +122,27 @@ void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
|
||||||
// Substract from the preferred heap size some bytes to avoid getting out of memory.
|
// Substract from the preferred heap size some bytes to avoid getting out of memory.
|
||||||
const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
|
const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
|
||||||
const VkDeviceSize allocable_size = heap_size - 9 * 1024 * 1024;
|
const VkDeviceSize allocable_size = heap_size - 9 * 1024 * 1024;
|
||||||
|
buffer = device.GetLogical().CreateBuffer({
|
||||||
VkBufferCreateInfo buffer_ci;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
.pNext = nullptr,
|
||||||
buffer_ci.pNext = nullptr;
|
.flags = 0,
|
||||||
buffer_ci.flags = 0;
|
.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size),
|
||||||
buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size);
|
.usage = usage,
|
||||||
buffer_ci.usage = usage;
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
.queueFamilyIndexCount = 0,
|
||||||
buffer_ci.queueFamilyIndexCount = 0;
|
.pQueueFamilyIndices = nullptr,
|
||||||
buffer_ci.pQueueFamilyIndices = nullptr;
|
});
|
||||||
|
|
||||||
buffer = device.GetLogical().CreateBuffer(buffer_ci);
|
|
||||||
|
|
||||||
const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
|
const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
|
||||||
const u32 required_flags = requirements.memoryTypeBits;
|
const u32 required_flags = requirements.memoryTypeBits;
|
||||||
stream_buffer_size = static_cast<u64>(requirements.size);
|
stream_buffer_size = static_cast<u64>(requirements.size);
|
||||||
|
|
||||||
VkMemoryAllocateInfo memory_ai;
|
memory = device.GetLogical().AllocateMemory({
|
||||||
memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||||
memory_ai.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
memory_ai.allocationSize = requirements.size;
|
.allocationSize = requirements.size,
|
||||||
memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags);
|
.memoryTypeIndex = GetMemoryType(memory_properties, required_flags),
|
||||||
|
});
|
||||||
memory = device.GetLogical().AllocateMemory(memory_ai);
|
|
||||||
buffer.BindMemory(*memory, 0);
|
buffer.BindMemory(*memory, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,15 +95,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
|
||||||
const auto present_queue{device.GetPresentQueue()};
|
const auto present_queue{device.GetPresentQueue()};
|
||||||
bool recreated = false;
|
bool recreated = false;
|
||||||
|
|
||||||
VkPresentInfoKHR present_info;
|
const VkPresentInfoKHR present_info{
|
||||||
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
|
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||||
present_info.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
present_info.waitSemaphoreCount = render_semaphore ? 2U : 1U;
|
.waitSemaphoreCount = render_semaphore ? 2U : 1U,
|
||||||
present_info.pWaitSemaphores = semaphores.data();
|
.pWaitSemaphores = semaphores.data(),
|
||||||
present_info.swapchainCount = 1;
|
.swapchainCount = 1,
|
||||||
present_info.pSwapchains = swapchain.address();
|
.pSwapchains = swapchain.address(),
|
||||||
present_info.pImageIndices = &image_index;
|
.pImageIndices = &image_index,
|
||||||
present_info.pResults = nullptr;
|
.pResults = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
switch (const VkResult result = present_queue.Present(present_info)) {
|
switch (const VkResult result = present_queue.Present(present_info)) {
|
||||||
case VK_SUCCESS:
|
case VK_SUCCESS:
|
||||||
|
@ -147,24 +148,25 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
requested_image_count = capabilities.maxImageCount;
|
requested_image_count = capabilities.maxImageCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkSwapchainCreateInfoKHR swapchain_ci;
|
VkSwapchainCreateInfoKHR swapchain_ci{
|
||||||
swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
|
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
||||||
swapchain_ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
swapchain_ci.flags = 0;
|
.flags = 0,
|
||||||
swapchain_ci.surface = surface;
|
.surface = surface,
|
||||||
swapchain_ci.minImageCount = requested_image_count;
|
.minImageCount = requested_image_count,
|
||||||
swapchain_ci.imageFormat = surface_format.format;
|
.imageFormat = surface_format.format,
|
||||||
swapchain_ci.imageColorSpace = surface_format.colorSpace;
|
.imageColorSpace = surface_format.colorSpace,
|
||||||
swapchain_ci.imageArrayLayers = 1;
|
.imageArrayLayers = 1,
|
||||||
swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
|
||||||
swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
swapchain_ci.queueFamilyIndexCount = 0;
|
.queueFamilyIndexCount = 0,
|
||||||
swapchain_ci.pQueueFamilyIndices = nullptr;
|
.pQueueFamilyIndices = nullptr,
|
||||||
swapchain_ci.preTransform = capabilities.currentTransform;
|
.preTransform = capabilities.currentTransform,
|
||||||
swapchain_ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
|
.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
|
||||||
swapchain_ci.presentMode = present_mode;
|
.presentMode = present_mode,
|
||||||
swapchain_ci.clipped = VK_FALSE;
|
.clipped = VK_FALSE,
|
||||||
swapchain_ci.oldSwapchain = nullptr;
|
.oldSwapchain = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
const u32 graphics_family{device.GetGraphicsFamily()};
|
const u32 graphics_family{device.GetGraphicsFamily()};
|
||||||
const u32 present_family{device.GetPresentFamily()};
|
const u32 present_family{device.GetPresentFamily()};
|
||||||
|
@ -173,8 +175,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
||||||
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
||||||
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
||||||
} else {
|
|
||||||
swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
||||||
|
@ -200,20 +200,28 @@ void VKSwapchain::CreateSemaphores() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKSwapchain::CreateImageViews() {
|
void VKSwapchain::CreateImageViews() {
|
||||||
VkImageViewCreateInfo ci;
|
VkImageViewCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
// ci.image
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
||||||
ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
|
.format = image_format,
|
||||||
ci.format = image_format;
|
.components =
|
||||||
ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
|
{
|
||||||
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.baseMipLevel = 0;
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.levelCount = 1;
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.baseArrayLayer = 0;
|
},
|
||||||
ci.subresourceRange.layerCount = 1;
|
.subresourceRange =
|
||||||
|
{
|
||||||
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = 1,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = 1,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
image_views.resize(image_count);
|
image_views.resize(image_count);
|
||||||
for (std::size_t i = 0; i < image_count; i++) {
|
for (std::size_t i = 0; i < image_count; i++) {
|
||||||
|
|
|
@ -95,17 +95,18 @@ VkImageViewType GetImageViewType(SurfaceTarget target) {
|
||||||
vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
|
vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
|
||||||
std::size_t host_memory_size) {
|
std::size_t host_memory_size) {
|
||||||
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
|
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
|
||||||
VkBufferCreateInfo ci;
|
return device.GetLogical().CreateBuffer({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.size = static_cast<VkDeviceSize>(host_memory_size);
|
.size = static_cast<VkDeviceSize>(host_memory_size),
|
||||||
ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
|
.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
|
||||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
|
||||||
ci.queueFamilyIndexCount = 0;
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
.queueFamilyIndexCount = 0,
|
||||||
return device.GetLogical().CreateBuffer(ci);
|
.pQueueFamilyIndices = nullptr,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
|
VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
|
||||||
|
@ -113,15 +114,16 @@ VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
|
||||||
std::size_t host_memory_size) {
|
std::size_t host_memory_size) {
|
||||||
ASSERT(params.IsBuffer());
|
ASSERT(params.IsBuffer());
|
||||||
|
|
||||||
VkBufferViewCreateInfo ci;
|
return {
|
||||||
ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.buffer = buffer;
|
.buffer = buffer,
|
||||||
ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
|
.format =
|
||||||
ci.offset = 0;
|
MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format,
|
||||||
ci.range = static_cast<VkDeviceSize>(host_memory_size);
|
.offset = 0,
|
||||||
return ci;
|
.range = static_cast<VkDeviceSize>(host_memory_size),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
|
VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
|
||||||
|
@ -130,23 +132,23 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
|
||||||
const auto [format, attachable, storage] =
|
const auto [format, attachable, storage] =
|
||||||
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
|
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
|
||||||
|
|
||||||
VkImageCreateInfo ci;
|
VkImageCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.imageType = SurfaceTargetToImage(params.target);
|
.imageType = SurfaceTargetToImage(params.target),
|
||||||
ci.format = format;
|
.format = format,
|
||||||
ci.mipLevels = params.num_levels;
|
.mipLevels = params.num_levels,
|
||||||
ci.arrayLayers = static_cast<u32>(params.GetNumLayers());
|
.arrayLayers = static_cast<u32>(params.GetNumLayers()),
|
||||||
ci.samples = VK_SAMPLE_COUNT_1_BIT;
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
||||||
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
|
||||||
ci.queueFamilyIndexCount = 0;
|
VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||||
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
.queueFamilyIndexCount = 0,
|
||||||
|
.pQueueFamilyIndices = nullptr,
|
||||||
ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
||||||
VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
|
};
|
||||||
if (attachable) {
|
if (attachable) {
|
||||||
ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
|
ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
|
||||||
: VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
: VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
||||||
|
@ -321,22 +323,25 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
|
VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
|
||||||
VkBufferImageCopy copy;
|
return {
|
||||||
copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted);
|
.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted),
|
||||||
copy.bufferRowLength = 0;
|
.bufferRowLength = 0,
|
||||||
copy.bufferImageHeight = 0;
|
.bufferImageHeight = 0,
|
||||||
copy.imageSubresource.aspectMask = image->GetAspectMask();
|
.imageSubresource =
|
||||||
copy.imageSubresource.mipLevel = level;
|
{
|
||||||
copy.imageSubresource.baseArrayLayer = 0;
|
.aspectMask = image->GetAspectMask(),
|
||||||
copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers());
|
.mipLevel = level,
|
||||||
copy.imageOffset.x = 0;
|
.baseArrayLayer = 0,
|
||||||
copy.imageOffset.y = 0;
|
.layerCount = static_cast<u32>(params.GetNumLayers()),
|
||||||
copy.imageOffset.z = 0;
|
},
|
||||||
copy.imageExtent.width = params.GetMipWidth(level);
|
.imageOffset = {.x = 0, .y = 0, .z = 0},
|
||||||
copy.imageExtent.height = params.GetMipHeight(level);
|
.imageExtent =
|
||||||
copy.imageExtent.depth =
|
{
|
||||||
params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
|
.width = params.GetMipWidth(level),
|
||||||
return copy;
|
.height = params.GetMipHeight(level),
|
||||||
|
.depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1U,
|
||||||
|
},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
|
VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
|
||||||
|
@ -416,20 +421,29 @@ VkImageView CachedSurfaceView::GetImageView(SwizzleSource x_source, SwizzleSourc
|
||||||
ASSERT(num_slices == params.depth);
|
ASSERT(num_slices == params.depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
VkImageViewCreateInfo ci;
|
image_view = device.GetLogical().CreateImageView({
|
||||||
ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.image = surface.GetImageHandle();
|
.image = surface.GetImageHandle(),
|
||||||
ci.viewType = image_view_type;
|
.viewType = image_view_type,
|
||||||
ci.format = surface.GetImage().GetFormat();
|
.format = surface.GetImage().GetFormat(),
|
||||||
ci.components = {swizzle[0], swizzle[1], swizzle[2], swizzle[3]};
|
.components =
|
||||||
ci.subresourceRange.aspectMask = aspect;
|
{
|
||||||
ci.subresourceRange.baseMipLevel = base_level;
|
.r = swizzle[0],
|
||||||
ci.subresourceRange.levelCount = num_levels;
|
.g = swizzle[1],
|
||||||
ci.subresourceRange.baseArrayLayer = base_layer;
|
.b = swizzle[2],
|
||||||
ci.subresourceRange.layerCount = num_layers;
|
.a = swizzle[3],
|
||||||
image_view = device.GetLogical().CreateImageView(ci);
|
},
|
||||||
|
.subresourceRange =
|
||||||
|
{
|
||||||
|
.aspectMask = aspect,
|
||||||
|
.baseMipLevel = base_level,
|
||||||
|
.levelCount = num_levels,
|
||||||
|
.baseArrayLayer = base_layer,
|
||||||
|
.layerCount = num_layers,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
return last_image_view = *image_view;
|
return last_image_view = *image_view;
|
||||||
}
|
}
|
||||||
|
@ -439,17 +453,26 @@ VkImageView CachedSurfaceView::GetAttachment() {
|
||||||
return *render_target;
|
return *render_target;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkImageViewCreateInfo ci;
|
VkImageViewCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.image = surface.GetImageHandle();
|
.image = surface.GetImageHandle(),
|
||||||
ci.format = surface.GetImage().GetFormat();
|
.format = surface.GetImage().GetFormat(),
|
||||||
ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
|
.components =
|
||||||
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
|
{
|
||||||
ci.subresourceRange.aspectMask = aspect_mask;
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.baseMipLevel = base_level;
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
ci.subresourceRange.levelCount = num_levels;
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
|
},
|
||||||
|
.subresourceRange =
|
||||||
|
{
|
||||||
|
.aspectMask = aspect_mask,
|
||||||
|
.baseMipLevel = base_level,
|
||||||
|
.levelCount = num_levels,
|
||||||
|
},
|
||||||
|
};
|
||||||
if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
|
if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
|
||||||
ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D;
|
ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D;
|
||||||
ci.subresourceRange.baseArrayLayer = base_slice;
|
ci.subresourceRange.baseArrayLayer = base_slice;
|
||||||
|
@ -502,24 +525,40 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
|
||||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
|
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
|
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
|
||||||
|
|
||||||
VkImageCopy copy;
|
const VkImageCopy copy{
|
||||||
copy.srcSubresource.aspectMask = src_surface->GetAspectMask();
|
.srcSubresource =
|
||||||
copy.srcSubresource.mipLevel = copy_params.source_level;
|
{
|
||||||
copy.srcSubresource.baseArrayLayer = copy_params.source_z;
|
.aspectMask = src_surface->GetAspectMask(),
|
||||||
copy.srcSubresource.layerCount = num_layers;
|
.mipLevel = copy_params.source_level,
|
||||||
copy.srcOffset.x = copy_params.source_x;
|
.baseArrayLayer = copy_params.source_z,
|
||||||
copy.srcOffset.y = copy_params.source_y;
|
.layerCount = num_layers,
|
||||||
copy.srcOffset.z = 0;
|
},
|
||||||
copy.dstSubresource.aspectMask = dst_surface->GetAspectMask();
|
.srcOffset =
|
||||||
copy.dstSubresource.mipLevel = copy_params.dest_level;
|
{
|
||||||
copy.dstSubresource.baseArrayLayer = dst_base_layer;
|
.x = static_cast<s32>(copy_params.source_x),
|
||||||
copy.dstSubresource.layerCount = num_layers;
|
.y = static_cast<s32>(copy_params.source_y),
|
||||||
copy.dstOffset.x = copy_params.dest_x;
|
.z = 0,
|
||||||
copy.dstOffset.y = copy_params.dest_y;
|
},
|
||||||
copy.dstOffset.z = dst_offset_z;
|
.dstSubresource =
|
||||||
copy.extent.width = copy_params.width;
|
{
|
||||||
copy.extent.height = copy_params.height;
|
.aspectMask = dst_surface->GetAspectMask(),
|
||||||
copy.extent.depth = extent_z;
|
.mipLevel = copy_params.dest_level,
|
||||||
|
.baseArrayLayer = dst_base_layer,
|
||||||
|
.layerCount = num_layers,
|
||||||
|
},
|
||||||
|
.dstOffset =
|
||||||
|
{
|
||||||
|
.x = static_cast<s32>(copy_params.dest_x),
|
||||||
|
.y = static_cast<s32>(copy_params.dest_y),
|
||||||
|
.z = static_cast<s32>(dst_offset_z),
|
||||||
|
},
|
||||||
|
.extent =
|
||||||
|
{
|
||||||
|
.width = copy_params.width,
|
||||||
|
.height = copy_params.height,
|
||||||
|
.depth = extent_z,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
const VkImage src_image = src_surface->GetImageHandle();
|
const VkImage src_image = src_surface->GetImageHandle();
|
||||||
const VkImage dst_image = dst_surface->GetImageHandle();
|
const VkImage dst_image = dst_surface->GetImageHandle();
|
||||||
|
|
|
@ -377,24 +377,26 @@ VkResult Free(VkDevice device, VkCommandPool handle, Span<VkCommandBuffer> buffe
|
||||||
|
|
||||||
Instance Instance::Create(Span<const char*> layers, Span<const char*> extensions,
|
Instance Instance::Create(Span<const char*> layers, Span<const char*> extensions,
|
||||||
InstanceDispatch& dld) noexcept {
|
InstanceDispatch& dld) noexcept {
|
||||||
VkApplicationInfo application_info;
|
static constexpr VkApplicationInfo application_info{
|
||||||
application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
|
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
|
||||||
application_info.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
application_info.pApplicationName = "yuzu Emulator";
|
.pApplicationName = "yuzu Emulator",
|
||||||
application_info.applicationVersion = VK_MAKE_VERSION(0, 1, 0);
|
.applicationVersion = VK_MAKE_VERSION(0, 1, 0),
|
||||||
application_info.pEngineName = "yuzu Emulator";
|
.pEngineName = "yuzu Emulator",
|
||||||
application_info.engineVersion = VK_MAKE_VERSION(0, 1, 0);
|
.engineVersion = VK_MAKE_VERSION(0, 1, 0),
|
||||||
application_info.apiVersion = VK_API_VERSION_1_1;
|
.apiVersion = VK_API_VERSION_1_1,
|
||||||
|
};
|
||||||
|
|
||||||
VkInstanceCreateInfo ci;
|
const VkInstanceCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.pApplicationInfo = &application_info;
|
.pApplicationInfo = &application_info,
|
||||||
ci.enabledLayerCount = layers.size();
|
.enabledLayerCount = layers.size(),
|
||||||
ci.ppEnabledLayerNames = layers.data();
|
.ppEnabledLayerNames = layers.data(),
|
||||||
ci.enabledExtensionCount = extensions.size();
|
.enabledExtensionCount = extensions.size(),
|
||||||
ci.ppEnabledExtensionNames = extensions.data();
|
.ppEnabledExtensionNames = extensions.data(),
|
||||||
|
};
|
||||||
|
|
||||||
VkInstance instance;
|
VkInstance instance;
|
||||||
if (dld.vkCreateInstance(&ci, nullptr, &instance) != VK_SUCCESS) {
|
if (dld.vkCreateInstance(&ci, nullptr, &instance) != VK_SUCCESS) {
|
||||||
|
@ -425,19 +427,20 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
|
||||||
|
|
||||||
DebugCallback Instance::TryCreateDebugCallback(
|
DebugCallback Instance::TryCreateDebugCallback(
|
||||||
PFN_vkDebugUtilsMessengerCallbackEXT callback) noexcept {
|
PFN_vkDebugUtilsMessengerCallbackEXT callback) noexcept {
|
||||||
VkDebugUtilsMessengerCreateInfoEXT ci;
|
const VkDebugUtilsMessengerCreateInfoEXT ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
|
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
|
.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
|
||||||
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
|
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
|
||||||
VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
|
VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
|
||||||
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT;
|
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
|
||||||
ci.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
|
.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
|
||||||
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
|
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
|
||||||
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
|
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
|
||||||
ci.pfnUserCallback = callback;
|
.pfnUserCallback = callback,
|
||||||
ci.pUserData = nullptr;
|
.pUserData = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
VkDebugUtilsMessengerEXT messenger;
|
VkDebugUtilsMessengerEXT messenger;
|
||||||
if (dld->vkCreateDebugUtilsMessengerEXT(handle, &ci, nullptr, &messenger) != VK_SUCCESS) {
|
if (dld->vkCreateDebugUtilsMessengerEXT(handle, &ci, nullptr, &messenger) != VK_SUCCESS) {
|
||||||
|
@ -468,12 +471,13 @@ DescriptorSets DescriptorPool::Allocate(const VkDescriptorSetAllocateInfo& ai) c
|
||||||
}
|
}
|
||||||
|
|
||||||
CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLevel level) const {
|
CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLevel level) const {
|
||||||
VkCommandBufferAllocateInfo ai;
|
const VkCommandBufferAllocateInfo ai{
|
||||||
ai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||||
ai.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ai.commandPool = handle;
|
.commandPool = handle,
|
||||||
ai.level = level;
|
.level = level,
|
||||||
ai.commandBufferCount = static_cast<u32>(num_buffers);
|
.commandBufferCount = static_cast<u32>(num_buffers),
|
||||||
|
};
|
||||||
|
|
||||||
std::unique_ptr buffers = std::make_unique<VkCommandBuffer[]>(num_buffers);
|
std::unique_ptr buffers = std::make_unique<VkCommandBuffer[]>(num_buffers);
|
||||||
switch (const VkResult result = dld->vkAllocateCommandBuffers(owner, &ai, buffers.get())) {
|
switch (const VkResult result = dld->vkAllocateCommandBuffers(owner, &ai, buffers.get())) {
|
||||||
|
@ -497,17 +501,18 @@ std::vector<VkImage> SwapchainKHR::GetImages() const {
|
||||||
Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
|
Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
|
||||||
Span<const char*> enabled_extensions, const void* next,
|
Span<const char*> enabled_extensions, const void* next,
|
||||||
DeviceDispatch& dld) noexcept {
|
DeviceDispatch& dld) noexcept {
|
||||||
VkDeviceCreateInfo ci;
|
const VkDeviceCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
|
||||||
ci.pNext = next;
|
.pNext = next,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
ci.queueCreateInfoCount = queues_ci.size();
|
.queueCreateInfoCount = queues_ci.size(),
|
||||||
ci.pQueueCreateInfos = queues_ci.data();
|
.pQueueCreateInfos = queues_ci.data(),
|
||||||
ci.enabledLayerCount = 0;
|
.enabledLayerCount = 0,
|
||||||
ci.ppEnabledLayerNames = nullptr;
|
.ppEnabledLayerNames = nullptr,
|
||||||
ci.enabledExtensionCount = enabled_extensions.size();
|
.enabledExtensionCount = enabled_extensions.size(),
|
||||||
ci.ppEnabledExtensionNames = enabled_extensions.data();
|
.ppEnabledExtensionNames = enabled_extensions.data(),
|
||||||
ci.pEnabledFeatures = nullptr;
|
.pEnabledFeatures = nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
VkDevice device;
|
VkDevice device;
|
||||||
if (dld.vkCreateDevice(physical_device, &ci, nullptr, &device) != VK_SUCCESS) {
|
if (dld.vkCreateDevice(physical_device, &ci, nullptr, &device) != VK_SUCCESS) {
|
||||||
|
@ -548,10 +553,11 @@ ImageView Device::CreateImageView(const VkImageViewCreateInfo& ci) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
Semaphore Device::CreateSemaphore() const {
|
Semaphore Device::CreateSemaphore() const {
|
||||||
VkSemaphoreCreateInfo ci;
|
static constexpr VkSemaphoreCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
|
};
|
||||||
|
|
||||||
VkSemaphore object;
|
VkSemaphore object;
|
||||||
Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
|
Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
|
||||||
|
@ -639,10 +645,12 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
|
||||||
}
|
}
|
||||||
|
|
||||||
Event Device::CreateEvent() const {
|
Event Device::CreateEvent() const {
|
||||||
VkEventCreateInfo ci;
|
static constexpr VkEventCreateInfo ci{
|
||||||
ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
|
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
|
||||||
ci.pNext = nullptr;
|
.pNext = nullptr,
|
||||||
ci.flags = 0;
|
.flags = 0,
|
||||||
|
};
|
||||||
|
|
||||||
VkEvent object;
|
VkEvent object;
|
||||||
Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
|
Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
|
||||||
return Event(object, handle, *dld);
|
return Event(object, handle, *dld);
|
||||||
|
|
Loading…
Reference in a new issue