mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-24 17:30:56 +01:00
video_core: dma_pusher: Add support for prefetched command lists.
This commit is contained in:
parent
1d4cbb92f2
commit
c64545d07a
2 changed files with 52 additions and 25 deletions
|
@ -45,32 +45,42 @@ bool DmaPusher::Step() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const CommandList& command_list{dma_pushbuffer.front()};
|
CommandList& command_list{dma_pushbuffer.front()};
|
||||||
ASSERT_OR_EXECUTE(!command_list.empty(), {
|
|
||||||
// Somehow the command_list is empty, in order to avoid a crash
|
ASSERT_OR_EXECUTE(
|
||||||
// We ignore it and assume its size is 0.
|
command_list.command_lists.size() || command_list.prefetch_command_list.size(), {
|
||||||
|
// Somehow the command_list is empty, in order to avoid a crash
|
||||||
|
// We ignore it and assume its size is 0.
|
||||||
|
dma_pushbuffer.pop();
|
||||||
|
dma_pushbuffer_subindex = 0;
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (command_list.prefetch_command_list.size()) {
|
||||||
|
// Prefetched command list from nvdrv, used for things like synchronization
|
||||||
|
command_headers = std::move(command_list.prefetch_command_list);
|
||||||
dma_pushbuffer.pop();
|
dma_pushbuffer.pop();
|
||||||
dma_pushbuffer_subindex = 0;
|
} else {
|
||||||
return true;
|
const CommandListHeader command_list_header{
|
||||||
});
|
command_list.command_lists[dma_pushbuffer_subindex]};
|
||||||
const CommandListHeader command_list_header{command_list[dma_pushbuffer_subindex++]};
|
const u64 next_hash = command_list.command_list_hashes[dma_pushbuffer_subindex++];
|
||||||
const GPUVAddr dma_get = command_list_header.addr;
|
const GPUVAddr dma_get = command_list_header.addr;
|
||||||
|
|
||||||
if (dma_pushbuffer_subindex >= command_list.size()) {
|
if (dma_pushbuffer_subindex >= command_list.command_lists.size()) {
|
||||||
// We've gone through the current list, remove it from the queue
|
// We've gone through the current list, remove it from the queue
|
||||||
dma_pushbuffer.pop();
|
dma_pushbuffer.pop();
|
||||||
dma_pushbuffer_subindex = 0;
|
dma_pushbuffer_subindex = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (command_list_header.size == 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push buffer non-empty, read a word
|
||||||
|
command_headers.resize(command_list_header.size);
|
||||||
|
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
|
||||||
|
command_list_header.size * sizeof(u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (command_list_header.size == 0) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push buffer non-empty, read a word
|
|
||||||
command_headers.resize(command_list_header.size);
|
|
||||||
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
|
|
||||||
command_list_header.size * sizeof(u32));
|
|
||||||
|
|
||||||
for (std::size_t index = 0; index < command_headers.size();) {
|
for (std::size_t index = 0; index < command_headers.size();) {
|
||||||
const CommandHeader& command_header = command_headers[index];
|
const CommandHeader& command_header = command_headers[index];
|
||||||
|
|
||||||
|
|
|
@ -74,9 +74,26 @@ union CommandHeader {
|
||||||
static_assert(std::is_standard_layout_v<CommandHeader>, "CommandHeader is not standard layout");
|
static_assert(std::is_standard_layout_v<CommandHeader>, "CommandHeader is not standard layout");
|
||||||
static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
|
static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
|
||||||
|
|
||||||
|
static constexpr CommandHeader BuildCommandHeader(BufferMethods method, u32 arg_count,
|
||||||
|
SubmissionMode mode) {
|
||||||
|
CommandHeader result{};
|
||||||
|
result.method.Assign(static_cast<u32>(method));
|
||||||
|
result.arg_count.Assign(arg_count);
|
||||||
|
result.mode.Assign(mode);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
class GPU;
|
class GPU;
|
||||||
|
|
||||||
using CommandList = std::vector<Tegra::CommandListHeader>;
|
struct CommandList final {
|
||||||
|
CommandList() = default;
|
||||||
|
explicit CommandList(std::size_t size) : command_lists(size) {}
|
||||||
|
explicit CommandList(std::vector<Tegra::CommandHeader>&& prefetch_command_list)
|
||||||
|
: prefetch_command_list{std::move(prefetch_command_list)} {}
|
||||||
|
|
||||||
|
std::vector<Tegra::CommandListHeader> command_lists;
|
||||||
|
std::vector<Tegra::CommandHeader> prefetch_command_list;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The DmaPusher class implements DMA submission to FIFOs, providing an area of memory that the
|
* The DmaPusher class implements DMA submission to FIFOs, providing an area of memory that the
|
||||||
|
@ -85,7 +102,7 @@ using CommandList = std::vector<Tegra::CommandListHeader>;
|
||||||
* See https://envytools.readthedocs.io/en/latest/hw/fifo/dma-pusher.html#fifo-dma-pusher for
|
* See https://envytools.readthedocs.io/en/latest/hw/fifo/dma-pusher.html#fifo-dma-pusher for
|
||||||
* details on this implementation.
|
* details on this implementation.
|
||||||
*/
|
*/
|
||||||
class DmaPusher {
|
class DmaPusher final {
|
||||||
public:
|
public:
|
||||||
explicit DmaPusher(Core::System& system, GPU& gpu);
|
explicit DmaPusher(Core::System& system, GPU& gpu);
|
||||||
~DmaPusher();
|
~DmaPusher();
|
||||||
|
|
Loading…
Reference in a new issue