mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-26 00:32:48 +01:00
Merge pull request #9216 from vonchenplus/reimp_inline_index_buffer
video_core: Reimplement inline index buffer binding
This commit is contained in:
commit
7f1c6def1f
5 changed files with 31 additions and 33 deletions
|
@ -992,7 +992,20 @@ void BufferCache<P>::BindHostIndexBuffer() {
|
||||||
TouchBuffer(buffer, index_buffer.buffer_id);
|
TouchBuffer(buffer, index_buffer.buffer_id);
|
||||||
const u32 offset = buffer.Offset(index_buffer.cpu_addr);
|
const u32 offset = buffer.Offset(index_buffer.cpu_addr);
|
||||||
const u32 size = index_buffer.size;
|
const u32 size = index_buffer.size;
|
||||||
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
|
if (maxwell3d->inline_index_draw_indexes.size()) {
|
||||||
|
if constexpr (USE_MEMORY_MAPS) {
|
||||||
|
auto upload_staging = runtime.UploadStagingBuffer(size);
|
||||||
|
std::array<BufferCopy, 1> copies{
|
||||||
|
{BufferCopy{.src_offset = upload_staging.offset, .dst_offset = 0, .size = size}}};
|
||||||
|
std::memcpy(upload_staging.mapped_span.data(),
|
||||||
|
maxwell3d->inline_index_draw_indexes.data(), size);
|
||||||
|
runtime.CopyBuffer(buffer, upload_staging.buffer, copies);
|
||||||
|
} else {
|
||||||
|
buffer.ImmediateUpload(0, maxwell3d->inline_index_draw_indexes);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
|
||||||
|
}
|
||||||
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
||||||
const u32 new_offset = offset + maxwell3d->regs.index_buffer.first *
|
const u32 new_offset = offset + maxwell3d->regs.index_buffer.first *
|
||||||
maxwell3d->regs.index_buffer.FormatSizeInBytes();
|
maxwell3d->regs.index_buffer.FormatSizeInBytes();
|
||||||
|
@ -1275,7 +1288,15 @@ void BufferCache<P>::UpdateIndexBuffer() {
|
||||||
}
|
}
|
||||||
flags[Dirty::IndexBuffer] = false;
|
flags[Dirty::IndexBuffer] = false;
|
||||||
last_index_count = index_array.count;
|
last_index_count = index_array.count;
|
||||||
|
if (maxwell3d->inline_index_draw_indexes.size()) {
|
||||||
|
auto inline_index_size = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
|
||||||
|
index_buffer = Binding{
|
||||||
|
.cpu_addr = 0,
|
||||||
|
.size = inline_index_size,
|
||||||
|
.buffer_id = CreateBuffer(0, inline_index_size),
|
||||||
|
};
|
||||||
|
return;
|
||||||
|
}
|
||||||
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
|
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
|
||||||
const GPUVAddr gpu_addr_end = index_array.EndAddress();
|
const GPUVAddr gpu_addr_end = index_array.EndAddress();
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
||||||
|
@ -1491,6 +1512,14 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
|
||||||
VAddr end = cpu_addr + wanted_size;
|
VAddr end = cpu_addr + wanted_size;
|
||||||
int stream_score = 0;
|
int stream_score = 0;
|
||||||
bool has_stream_leap = false;
|
bool has_stream_leap = false;
|
||||||
|
if (begin == 0) {
|
||||||
|
return OverlapResult{
|
||||||
|
.ids = std::move(overlap_ids),
|
||||||
|
.begin = begin,
|
||||||
|
.end = end,
|
||||||
|
.has_stream_leap = has_stream_leap,
|
||||||
|
};
|
||||||
|
}
|
||||||
for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE);
|
for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE);
|
||||||
cpu_addr += YUZU_PAGESIZE) {
|
cpu_addr += YUZU_PAGESIZE) {
|
||||||
const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS];
|
const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS];
|
||||||
|
|
|
@ -222,8 +222,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, u32 instance_count) {
|
||||||
pipeline->SetEngine(maxwell3d, gpu_memory);
|
pipeline->SetEngine(maxwell3d, gpu_memory);
|
||||||
pipeline->Configure(is_indexed);
|
pipeline->Configure(is_indexed);
|
||||||
|
|
||||||
BindInlineIndexBuffer();
|
|
||||||
|
|
||||||
SyncState();
|
SyncState();
|
||||||
|
|
||||||
const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
|
const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
|
||||||
|
@ -1140,16 +1138,6 @@ void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
|
||||||
query_cache.EraseChannel(channel_id);
|
query_cache.EraseChannel(channel_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::BindInlineIndexBuffer() {
|
|
||||||
if (maxwell3d->inline_index_draw_indexes.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
|
|
||||||
auto buffer = Buffer(buffer_cache_runtime, *this, 0, data_count);
|
|
||||||
buffer.ImmediateUpload(0, maxwell3d->inline_index_draw_indexes);
|
|
||||||
buffer_cache_runtime.BindIndexBuffer(buffer, 0, data_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
|
AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
|
||||||
|
|
||||||
bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
|
bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
|
||||||
|
|
|
@ -199,8 +199,6 @@ private:
|
||||||
/// End a transform feedback
|
/// End a transform feedback
|
||||||
void EndTransformFeedback();
|
void EndTransformFeedback();
|
||||||
|
|
||||||
void BindInlineIndexBuffer();
|
|
||||||
|
|
||||||
Tegra::GPU& gpu;
|
Tegra::GPU& gpu;
|
||||||
|
|
||||||
const Device& device;
|
const Device& device;
|
||||||
|
|
|
@ -191,8 +191,6 @@ void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
|
||||||
pipeline->SetEngine(maxwell3d, gpu_memory);
|
pipeline->SetEngine(maxwell3d, gpu_memory);
|
||||||
pipeline->Configure(is_indexed);
|
pipeline->Configure(is_indexed);
|
||||||
|
|
||||||
BindInlineIndexBuffer();
|
|
||||||
|
|
||||||
BeginTransformFeedback();
|
BeginTransformFeedback();
|
||||||
|
|
||||||
UpdateDynamicStates();
|
UpdateDynamicStates();
|
||||||
|
@ -1029,17 +1027,4 @@ void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
|
||||||
query_cache.EraseChannel(channel_id);
|
query_cache.EraseChannel(channel_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::BindInlineIndexBuffer() {
|
|
||||||
if (maxwell3d->inline_index_draw_indexes.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const auto data_count = static_cast<u32>(maxwell3d->inline_index_draw_indexes.size());
|
|
||||||
auto buffer = buffer_cache_runtime.UploadStagingBuffer(data_count);
|
|
||||||
std::memcpy(buffer.mapped_span.data(), maxwell3d->inline_index_draw_indexes.data(), data_count);
|
|
||||||
buffer_cache_runtime.BindIndexBuffer(
|
|
||||||
maxwell3d->regs.draw.topology, maxwell3d->regs.index_buffer.format,
|
|
||||||
maxwell3d->regs.index_buffer.first, maxwell3d->regs.index_buffer.count, buffer.buffer,
|
|
||||||
static_cast<u32>(buffer.offset), data_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -141,8 +141,6 @@ private:
|
||||||
|
|
||||||
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
|
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||||
|
|
||||||
void BindInlineIndexBuffer();
|
|
||||||
|
|
||||||
Tegra::GPU& gpu;
|
Tegra::GPU& gpu;
|
||||||
|
|
||||||
ScreenInfo& screen_info;
|
ScreenInfo& screen_info;
|
||||||
|
|
Loading…
Reference in a new issue