mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-24 01:10:56 +01:00
Merge pull request #5201 from ameerj/bufferq-refactor
vi/buffer_queue: Buffer queue management refactor
This commit is contained in:
commit
c3e201a829
3 changed files with 65 additions and 72 deletions
|
@ -22,10 +22,11 @@ BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id, u64 layer_id)
|
|||
BufferQueue::~BufferQueue() = default;
|
||||
|
||||
void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
|
||||
ASSERT(slot < buffer_slots);
|
||||
LOG_WARNING(Service, "Adding graphics buffer {}", slot);
|
||||
|
||||
free_buffers.push_back(slot);
|
||||
queue.push_back({
|
||||
buffers[slot] = {
|
||||
.slot = slot,
|
||||
.status = Buffer::Status::Free,
|
||||
.igbp_buffer = igbp_buffer,
|
||||
|
@ -33,7 +34,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
|
|||
.crop_rect = {},
|
||||
.swap_interval = 0,
|
||||
.multi_fence = {},
|
||||
});
|
||||
};
|
||||
|
||||
buffer_wait_event.writable->Signal();
|
||||
}
|
||||
|
@ -44,73 +45,57 @@ std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::Dequeue
|
|||
if (free_buffers.empty()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
auto f_itr = free_buffers.begin();
|
||||
auto itr = queue.end();
|
||||
auto slot = buffers.size();
|
||||
|
||||
while (f_itr != free_buffers.end()) {
|
||||
auto slot = *f_itr;
|
||||
itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) {
|
||||
// Only consider free buffers. Buffers become free once again after they've been
|
||||
// Acquired and Released by the compositor, see the NVFlinger::Compose method.
|
||||
if (buffer.status != Buffer::Status::Free) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (buffer.slot != slot) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure that the parameters match.
|
||||
return buffer.igbp_buffer.width == width && buffer.igbp_buffer.height == height;
|
||||
});
|
||||
|
||||
if (itr != queue.end()) {
|
||||
const Buffer& buffer = buffers[*f_itr];
|
||||
if (buffer.status == Buffer::Status::Free && buffer.igbp_buffer.width == width &&
|
||||
buffer.igbp_buffer.height == height) {
|
||||
slot = *f_itr;
|
||||
free_buffers.erase(f_itr);
|
||||
break;
|
||||
}
|
||||
++f_itr;
|
||||
}
|
||||
|
||||
if (itr == queue.end()) {
|
||||
if (slot == buffers.size()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
itr->status = Buffer::Status::Dequeued;
|
||||
return {{itr->slot, &itr->multi_fence}};
|
||||
buffers[slot].status = Buffer::Status::Dequeued;
|
||||
return {{buffers[slot].slot, &buffers[slot].multi_fence}};
|
||||
}
|
||||
|
||||
const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const {
|
||||
auto itr = std::find_if(queue.begin(), queue.end(),
|
||||
[&](const Buffer& buffer) { return buffer.slot == slot; });
|
||||
ASSERT(itr != queue.end());
|
||||
ASSERT(itr->status == Buffer::Status::Dequeued);
|
||||
return itr->igbp_buffer;
|
||||
ASSERT(slot < buffers.size());
|
||||
ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
|
||||
ASSERT(buffers[slot].slot == slot);
|
||||
|
||||
return buffers[slot].igbp_buffer;
|
||||
}
|
||||
|
||||
void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform,
|
||||
const Common::Rectangle<int>& crop_rect, u32 swap_interval,
|
||||
Service::Nvidia::MultiFence& multi_fence) {
|
||||
auto itr = std::find_if(queue.begin(), queue.end(),
|
||||
[&](const Buffer& buffer) { return buffer.slot == slot; });
|
||||
ASSERT(itr != queue.end());
|
||||
ASSERT(itr->status == Buffer::Status::Dequeued);
|
||||
itr->status = Buffer::Status::Queued;
|
||||
itr->transform = transform;
|
||||
itr->crop_rect = crop_rect;
|
||||
itr->swap_interval = swap_interval;
|
||||
itr->multi_fence = multi_fence;
|
||||
ASSERT(slot < buffers.size());
|
||||
ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
|
||||
ASSERT(buffers[slot].slot == slot);
|
||||
|
||||
buffers[slot].status = Buffer::Status::Queued;
|
||||
buffers[slot].transform = transform;
|
||||
buffers[slot].crop_rect = crop_rect;
|
||||
buffers[slot].swap_interval = swap_interval;
|
||||
buffers[slot].multi_fence = multi_fence;
|
||||
queue_sequence.push_back(slot);
|
||||
}
|
||||
|
||||
void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence) {
|
||||
const auto itr = std::find_if(queue.begin(), queue.end(),
|
||||
[slot](const Buffer& buffer) { return buffer.slot == slot; });
|
||||
ASSERT(itr != queue.end());
|
||||
ASSERT(itr->status != Buffer::Status::Free);
|
||||
itr->status = Buffer::Status::Free;
|
||||
itr->multi_fence = multi_fence;
|
||||
itr->swap_interval = 0;
|
||||
ASSERT(slot < buffers.size());
|
||||
ASSERT(buffers[slot].status != Buffer::Status::Free);
|
||||
ASSERT(buffers[slot].slot == slot);
|
||||
|
||||
buffers[slot].status = Buffer::Status::Free;
|
||||
buffers[slot].multi_fence = multi_fence;
|
||||
buffers[slot].swap_interval = 0;
|
||||
|
||||
free_buffers.push_back(slot);
|
||||
|
||||
|
@ -118,38 +103,39 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
|
|||
}
|
||||
|
||||
std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
|
||||
auto itr = queue.end();
|
||||
std::size_t buffer_slot = buffers.size();
|
||||
// Iterate to find a queued buffer matching the requested slot.
|
||||
while (itr == queue.end() && !queue_sequence.empty()) {
|
||||
const u32 slot = queue_sequence.front();
|
||||
itr = std::find_if(queue.begin(), queue.end(), [&slot](const Buffer& buffer) {
|
||||
return buffer.status == Buffer::Status::Queued && buffer.slot == slot;
|
||||
});
|
||||
while (buffer_slot == buffers.size() && !queue_sequence.empty()) {
|
||||
const auto slot = static_cast<std::size_t>(queue_sequence.front());
|
||||
ASSERT(slot < buffers.size());
|
||||
if (buffers[slot].status == Buffer::Status::Queued) {
|
||||
ASSERT(buffers[slot].slot == slot);
|
||||
buffer_slot = slot;
|
||||
}
|
||||
queue_sequence.pop_front();
|
||||
}
|
||||
if (itr == queue.end()) {
|
||||
if (buffer_slot == buffers.size()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
itr->status = Buffer::Status::Acquired;
|
||||
return *itr;
|
||||
buffers[buffer_slot].status = Buffer::Status::Acquired;
|
||||
return {{buffers[buffer_slot]}};
|
||||
}
|
||||
|
||||
void BufferQueue::ReleaseBuffer(u32 slot) {
|
||||
auto itr = std::find_if(queue.begin(), queue.end(),
|
||||
[&](const Buffer& buffer) { return buffer.slot == slot; });
|
||||
ASSERT(itr != queue.end());
|
||||
ASSERT(itr->status == Buffer::Status::Acquired);
|
||||
itr->status = Buffer::Status::Free;
|
||||
ASSERT(slot < buffers.size());
|
||||
ASSERT(buffers[slot].status == Buffer::Status::Acquired);
|
||||
ASSERT(buffers[slot].slot == slot);
|
||||
|
||||
buffers[slot].status = Buffer::Status::Free;
|
||||
free_buffers.push_back(slot);
|
||||
|
||||
buffer_wait_event.writable->Signal();
|
||||
}
|
||||
|
||||
void BufferQueue::Disconnect() {
|
||||
queue.clear();
|
||||
buffers.fill({});
|
||||
queue_sequence.clear();
|
||||
id = 1;
|
||||
layer_id = 1;
|
||||
buffer_wait_event.writable->Signal();
|
||||
}
|
||||
|
||||
u32 BufferQueue::Query(QueryType type) {
|
||||
|
|
|
@ -21,6 +21,7 @@ class KernelCore;
|
|||
|
||||
namespace Service::NVFlinger {
|
||||
|
||||
constexpr u32 buffer_slots = 0x40;
|
||||
struct IGBPBuffer {
|
||||
u32_le magic;
|
||||
u32_le width;
|
||||
|
@ -114,7 +115,7 @@ private:
|
|||
u64 layer_id;
|
||||
|
||||
std::list<u32> free_buffers;
|
||||
std::vector<Buffer> queue;
|
||||
std::array<Buffer, buffer_slots> buffers;
|
||||
std::list<u32> queue_sequence;
|
||||
Kernel::EventPair buffer_wait_event;
|
||||
};
|
||||
|
|
|
@ -282,18 +282,24 @@ public:
|
|||
void DeserializeData() override {
|
||||
[[maybe_unused]] const std::u16string token = ReadInterfaceToken();
|
||||
data = Read<Data>();
|
||||
buffer = Read<NVFlinger::IGBPBuffer>();
|
||||
if (data.contains_object != 0) {
|
||||
buffer_container = Read<BufferContainer>();
|
||||
}
|
||||
}
|
||||
|
||||
struct Data {
|
||||
u32_le slot;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u32_le graphic_buffer_length;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
u32_le contains_object;
|
||||
};
|
||||
|
||||
Data data;
|
||||
NVFlinger::IGBPBuffer buffer;
|
||||
struct BufferContainer {
|
||||
u32_le graphic_buffer_length;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
NVFlinger::IGBPBuffer buffer{};
|
||||
};
|
||||
|
||||
Data data{};
|
||||
BufferContainer buffer_container{};
|
||||
};
|
||||
|
||||
class IGBPSetPreallocatedBufferResponseParcel : public Parcel {
|
||||
|
@ -547,7 +553,7 @@ private:
|
|||
case TransactionId::SetPreallocatedBuffer: {
|
||||
IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()};
|
||||
|
||||
buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer);
|
||||
buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer_container.buffer);
|
||||
|
||||
IGBPSetPreallocatedBufferResponseParcel response{};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
|
|
Loading…
Reference in a new issue