Ryujinx/Ryujinx.Graphics.OpenGL/Queries/CounterQueue.cs
riperiperi ec3e848d79
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation

About as fast as nvidia GL multithreading, can be improved with faster command queuing.

* Struct based command list

Speeds up a bit. Still a lot of time lost to resource copy.

* Do shader init while the render thread is active.

* Introduce circular span pool V1

Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.

* Refactor SpanRef some more

Use a struct to represent SpanRef, rather than a reference.

* Flush buffers on background thread

* Use a span for UpdateRenderScale.

Much faster than copying the array.

* Calculate command size using reflection

* WIP parallel shaders

* Some minor optimisation

* Only 2 max refs per command now.

The command with 3 refs is gone. 😌

* Don't cast on the GPU side

* Remove redundant casts, force sync on window present

* Fix Shader Cache

* Fix host shader save.

* Fixup to work with new renderer stuff

* Make command Run static, use array of delegates as lookup

Profile says this takes less time than the previous way.

* Bring up to date

* Add settings toggle. Fix Muiltithreading Off mode.

* Fix warning.

* Release tracking lock for flushes

* Fix Conditional Render fast path with threaded gal

* Make handle iteration safe when releasing the lock

This is mostly temporary.

* Attempt to set backend threading on driver

Only really works on nvidia before launching a game.

* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions

* Update buffer set commands

* Some cleanup

* Only use stutter workaround when using opengl renderer non-threaded

* Add host-conditional reservation of counter events

There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.

* Address Feedback

* Make counter flush tracked again.

Hopefully does not cause any issues this time.

* Wait for FlushTo on the main queue thread.

Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)

* Add SDL2 headless integration

* Add HLE macro commands.

Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00

234 lines
6.7 KiB
C#

using OpenTK.Graphics.OpenGL;
using Ryujinx.Graphics.GAL;
using System;
using System.Collections.Generic;
using System.Threading;
namespace Ryujinx.Graphics.OpenGL.Queries
{
class CounterQueue : IDisposable
{
private const int QueryPoolInitialSize = 100;
public CounterType Type { get; }
public bool Disposed { get; private set; }
private Queue<CounterQueueEvent> _events = new Queue<CounterQueueEvent>();
private CounterQueueEvent _current;
private ulong _accumulatedCounter;
private int _waiterCount;
private object _lock = new object();
private Queue<BufferedQuery> _queryPool;
private AutoResetEvent _queuedEvent = new AutoResetEvent(false);
private AutoResetEvent _wakeSignal = new AutoResetEvent(false);
private AutoResetEvent _eventConsumed = new AutoResetEvent(false);
private Thread _consumerThread;
internal CounterQueue(CounterType type)
{
Type = type;
QueryTarget glType = GetTarget(Type);
_queryPool = new Queue<BufferedQuery>(QueryPoolInitialSize);
for (int i = 0; i < QueryPoolInitialSize; i++)
{
_queryPool.Enqueue(new BufferedQuery(glType));
}
_current = new CounterQueueEvent(this, glType, 0);
_consumerThread = new Thread(EventConsumer);
_consumerThread.Start();
}
private void EventConsumer()
{
while (!Disposed)
{
CounterQueueEvent evt = null;
lock (_lock)
{
if (_events.Count > 0)
{
evt = _events.Dequeue();
}
}
if (evt == null)
{
_queuedEvent.WaitOne(); // No more events to go through, wait for more.
}
else
{
// Spin-wait rather than sleeping if there are any waiters, by passing null instead of the wake signal.
evt.TryConsume(ref _accumulatedCounter, true, _waiterCount == 0 ? _wakeSignal : null);
}
if (_waiterCount > 0)
{
_eventConsumed.Set();
}
}
}
internal BufferedQuery GetQueryObject()
{
// Creating/disposing query objects on a context we're sharing with will cause issues.
// So instead, make a lot of query objects on the main thread and reuse them.
lock (_lock)
{
if (_queryPool.Count > 0)
{
BufferedQuery result = _queryPool.Dequeue();
return result;
}
else
{
return new BufferedQuery(GetTarget(Type));
}
}
}
internal void ReturnQueryObject(BufferedQuery query)
{
lock (_lock)
{
_queryPool.Enqueue(query);
}
}
public CounterQueueEvent QueueReport(EventHandler<ulong> resultHandler, ulong lastDrawIndex, bool hostReserved)
{
CounterQueueEvent result;
ulong draws = lastDrawIndex - _current.DrawIndex;
lock (_lock)
{
// A query's result only matters if more than one draw was performed during it.
// Otherwise, dummy it out and return 0 immediately.
if (hostReserved)
{
// This counter event is guaranteed to be available for host conditional rendering.
_current.ReserveForHostAccess();
}
if (draws > 0)
{
_current.Complete(true);
_events.Enqueue(_current);
_current.OnResult += resultHandler;
}
else
{
_current.Complete(false);
_current.Dispose();
resultHandler(_current, 0);
}
result = _current;
_current = new CounterQueueEvent(this, GetTarget(Type), lastDrawIndex);
}
_queuedEvent.Set();
return result;
}
public void QueueReset()
{
lock (_lock)
{
_current.Clear();
}
}
private static QueryTarget GetTarget(CounterType type)
{
switch (type)
{
case CounterType.SamplesPassed: return QueryTarget.SamplesPassed;
case CounterType.PrimitivesGenerated: return QueryTarget.PrimitivesGenerated;
case CounterType.TransformFeedbackPrimitivesWritten: return QueryTarget.TransformFeedbackPrimitivesWritten;
}
return QueryTarget.SamplesPassed;
}
public void Flush(bool blocking)
{
if (!blocking)
{
// Just wake the consumer thread - it will update the queries.
_wakeSignal.Set();
return;
}
lock (_lock)
{
// Tell the queue to process all events.
while (_events.Count > 0)
{
CounterQueueEvent flush = _events.Peek();
if (!flush.TryConsume(ref _accumulatedCounter, true))
{
return; // If not blocking, then return when we encounter an event that is not ready yet.
}
_events.Dequeue();
}
}
}
public void FlushTo(CounterQueueEvent evt)
{
// Flush the counter queue on the main thread.
Interlocked.Increment(ref _waiterCount);
_wakeSignal.Set();
while (!evt.Disposed)
{
_eventConsumed.WaitOne(1);
}
Interlocked.Decrement(ref _waiterCount);
}
public void Dispose()
{
lock (_lock)
{
while (_events.Count > 0)
{
CounterQueueEvent evt = _events.Dequeue();
evt.Dispose();
}
Disposed = true;
}
_queuedEvent.Set();
_consumerThread.Join();
foreach (BufferedQuery query in _queryPool)
{
query.Dispose();
}
_queuedEvent.Dispose();
_wakeSignal.Dispose();
_eventConsumed.Dispose();
}
}
}