Ryujinx/Ryujinx.Graphics.OpenGL/Pipeline.cs

1546 lines
48 KiB
C#
Raw Normal View History

2019-10-13 08:02:07 +02:00
using OpenTK.Graphics.OpenGL;
2019-12-29 00:45:33 +01:00
using Ryujinx.Common.Logging;
2019-10-13 08:02:07 +02:00
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.OpenGL.Image;
using Ryujinx.Graphics.OpenGL.Queries;
2019-10-13 08:02:07 +02:00
using Ryujinx.Graphics.Shader;
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
2019-10-13 08:02:07 +02:00
namespace Ryujinx.Graphics.OpenGL
{
2019-12-31 23:09:49 +01:00
class Pipeline : IPipeline, IDisposable
2019-10-13 08:02:07 +02:00
{
private readonly DrawTextureEmulation _drawTexture;
internal ulong DrawCount { get; private set; }
2019-10-13 08:02:07 +02:00
private Program _program;
private bool _rasterizerDiscard;
2019-10-13 08:02:07 +02:00
private VertexArray _vertexArray;
private Framebuffer _framebuffer;
private IntPtr _indexBaseOffset;
private DrawElementsType _elementsType;
private PrimitiveType _primitiveType;
private int _stencilFrontMask;
2019-10-13 08:02:07 +02:00
private bool _depthMask;
private bool _depthTestEnable;
private bool _stencilTestEnable;
private bool _cullEnable;
private float[] _viewportArray = Array.Empty<float>();
private double[] _depthRangeArray = Array.Empty<double>();
2019-10-13 08:02:07 +02:00
private int _boundDrawFramebuffer;
private int _boundReadFramebuffer;
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
private CounterQueueEvent _activeConditionalRender;
private struct Vector4<T>
{
public T X;
public T Y;
public T Z;
public T W;
}
private Vector4<int>[] _fpIsBgra = new Vector4<int>[SupportBuffer.FragmentIsBgraCount];
private Vector4<float>[] _renderScale = new Vector4<float>[65];
private TextureBase _unit0Texture;
private Sampler _unit0Sampler;
2019-10-13 08:02:07 +02:00
private FrontFaceDirection _frontFace;
private ClipOrigin _clipOrigin;
private ClipDepthMode _clipDepthMode;
2019-10-13 08:02:07 +02:00
private readonly uint[] _componentMasks;
2019-10-13 08:02:07 +02:00
2021-01-26 22:44:07 +01:00
private uint _scissorEnables;
private bool _tfEnabled;
private TransformFeedbackPrimitiveType _tfTopology;
private BufferHandle _supportBuffer;
private readonly BufferHandle[] _tfbs;
private readonly BufferRange[] _tfbTargets;
private ColorF _blendConstant;
internal Pipeline()
2019-10-13 08:02:07 +02:00
{
_drawTexture = new DrawTextureEmulation();
_rasterizerDiscard = false;
_clipOrigin = ClipOrigin.LowerLeft;
_clipDepthMode = ClipDepthMode.NegativeOneToOne;
_componentMasks = new uint[Constants.MaxRenderTargets];
for (int index = 0; index < Constants.MaxRenderTargets; index++)
{
_componentMasks[index] = 0xf;
}
var defaultScale = new Vector4<float> { X = 1f, Y = 0f, Z = 0f, W = 0f };
new Span<Vector4<float>>(_renderScale).Fill(defaultScale);
_tfbs = new BufferHandle[Constants.MaxTransformFeedbackBuffers];
_tfbTargets = new BufferRange[Constants.MaxTransformFeedbackBuffers];
2019-10-13 08:02:07 +02:00
}
public void Initialize()
{
_supportBuffer = Buffer.Create(SupportBuffer.RequiredSize);
GL.BindBufferBase(BufferRangeTarget.UniformBuffer, 0, Unsafe.As<BufferHandle, int>(ref _supportBuffer));
SetSupportBufferData<Vector4<int>>(SupportBuffer.FragmentIsBgraOffset, _fpIsBgra, SupportBuffer.FragmentIsBgraCount);
SetSupportBufferData<Vector4<float>>(SupportBuffer.FragmentRenderScaleOffset, _renderScale, SupportBuffer.RenderScaleMaxCount);
}
public void Barrier()
{
GL.MemoryBarrier(MemoryBarrierFlags.AllBarrierBits);
}
2019-10-13 08:02:07 +02:00
public void BeginTransformFeedback(PrimitiveTopology topology)
{
GL.BeginTransformFeedback(_tfTopology = topology.ConvertToTfType());
_tfEnabled = true;
}
public void ClearBuffer(BufferHandle destination, int offset, int size, uint value)
{
Buffer.Clear(destination, offset, size, value);
}
2019-10-13 08:02:07 +02:00
public void ClearRenderTargetColor(int index, uint componentMask, ColorF color)
{
GL.ColorMask(
index,
(componentMask & 1) != 0,
(componentMask & 2) != 0,
(componentMask & 4) != 0,
(componentMask & 8) != 0);
float[] colors = new float[] { color.Red, color.Green, color.Blue, color.Alpha };
GL.ClearBuffer(OpenTK.Graphics.OpenGL.ClearBuffer.Color, index, colors);
2019-10-13 08:02:07 +02:00
RestoreComponentMask(index);
}
2019-12-29 18:41:50 +01:00
public void ClearRenderTargetDepthStencil(float depthValue, bool depthMask, int stencilValue, int stencilMask)
2019-10-13 08:02:07 +02:00
{
bool stencilMaskChanged =
stencilMask != 0 &&
stencilMask != _stencilFrontMask;
bool depthMaskChanged = depthMask && depthMask != _depthMask;
if (stencilMaskChanged)
{
GL.StencilMaskSeparate(StencilFace.Front, stencilMask);
}
if (depthMaskChanged)
{
GL.DepthMask(depthMask);
}
if (depthMask && stencilMask != 0)
{
GL.ClearBuffer(ClearBufferCombined.DepthStencil, 0, depthValue, stencilValue);
}
else if (depthMask)
{
GL.ClearBuffer(OpenTK.Graphics.OpenGL.ClearBuffer.Depth, 0, ref depthValue);
2019-10-13 08:02:07 +02:00
}
else if (stencilMask != 0)
{
GL.ClearBuffer(OpenTK.Graphics.OpenGL.ClearBuffer.Stencil, 0, ref stencilValue);
2019-10-13 08:02:07 +02:00
}
if (stencilMaskChanged)
{
GL.StencilMaskSeparate(StencilFace.Front, _stencilFrontMask);
}
if (depthMaskChanged)
{
GL.DepthMask(_depthMask);
}
}
public void CommandBufferBarrier()
{
GL.MemoryBarrier(MemoryBarrierFlags.CommandBarrierBit);
}
public void CopyBuffer(BufferHandle source, BufferHandle destination, int srcOffset, int dstOffset, int size)
{
Buffer.Copy(source, destination, srcOffset, dstOffset, size);
}
2019-12-29 18:41:50 +01:00
public void DispatchCompute(int groupsX, int groupsY, int groupsZ)
{
if (!_program.IsLinked)
{
Logger.Debug?.Print(LogClass.Gpu, "Dispatch error, shader not linked.");
return;
}
PrepareForDispatch();
GL.DispatchCompute(groupsX, groupsY, groupsZ);
}
2019-10-13 08:02:07 +02:00
public void Draw(int vertexCount, int instanceCount, int firstVertex, int firstInstance)
{
if (!_program.IsLinked)
{
Logger.Debug?.Print(LogClass.Gpu, "Draw error, shader not linked.");
2019-10-13 08:02:07 +02:00
return;
}
PreDraw();
2019-10-13 08:02:07 +02:00
if (_primitiveType == PrimitiveType.Quads && !HwCapabilities.SupportsQuads)
2019-10-13 08:02:07 +02:00
{
DrawQuadsImpl(vertexCount, instanceCount, firstVertex, firstInstance);
}
else if (_primitiveType == PrimitiveType.QuadStrip && !HwCapabilities.SupportsQuads)
{
DrawQuadStripImpl(vertexCount, instanceCount, firstVertex, firstInstance);
}
else
{
DrawImpl(vertexCount, instanceCount, firstVertex, firstInstance);
}
PostDraw();
}
2019-10-13 08:02:07 +02:00
private void DrawQuadsImpl(
int vertexCount,
int instanceCount,
int firstVertex,
int firstInstance)
{
// TODO: Instanced rendering.
int quadsCount = vertexCount / 4;
int[] firsts = new int[quadsCount];
int[] counts = new int[quadsCount];
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
firsts[quadIndex] = firstVertex + quadIndex * 4;
counts[quadIndex] = 4;
}
GL.MultiDrawArrays(
PrimitiveType.TriangleFan,
firsts,
counts,
quadsCount);
}
private void DrawQuadStripImpl(
int vertexCount,
int instanceCount,
int firstVertex,
int firstInstance)
{
int quadsCount = (vertexCount - 2) / 2;
if (firstInstance != 0 || instanceCount != 1)
{
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
GL.DrawArraysInstancedBaseInstance(PrimitiveType.TriangleFan, firstVertex + quadIndex * 2, 4, instanceCount, firstInstance);
}
}
else
{
int[] firsts = new int[quadsCount];
int[] counts = new int[quadsCount];
firsts[0] = firstVertex;
counts[0] = 4;
for (int quadIndex = 1; quadIndex < quadsCount; quadIndex++)
{
firsts[quadIndex] = firstVertex + quadIndex * 2;
counts[quadIndex] = 4;
}
GL.MultiDrawArrays(
PrimitiveType.TriangleFan,
firsts,
counts,
quadsCount);
}
}
2019-10-13 08:02:07 +02:00
private void DrawImpl(
int vertexCount,
int instanceCount,
int firstVertex,
int firstInstance)
{
if (firstInstance == 0 && instanceCount == 1)
{
GL.DrawArrays(_primitiveType, firstVertex, vertexCount);
2019-10-13 08:02:07 +02:00
}
else if (firstInstance == 0)
{
GL.DrawArraysInstanced(_primitiveType, firstVertex, vertexCount, instanceCount);
}
else
{
GL.DrawArraysInstancedBaseInstance(
_primitiveType,
firstVertex,
vertexCount,
instanceCount,
firstInstance);
}
}
public void DrawIndexed(
int indexCount,
int instanceCount,
int firstIndex,
int firstVertex,
int firstInstance)
{
if (!_program.IsLinked)
{
Logger.Debug?.Print(LogClass.Gpu, "Draw error, shader not linked.");
2019-10-13 08:02:07 +02:00
return;
}
PreDraw();
2019-10-13 08:02:07 +02:00
int indexElemSize = 1;
2019-10-13 08:02:07 +02:00
switch (_elementsType)
{
case DrawElementsType.UnsignedShort: indexElemSize = 2; break;
case DrawElementsType.UnsignedInt: indexElemSize = 4; break;
}
IntPtr indexBaseOffset = _indexBaseOffset + firstIndex * indexElemSize;
if (_primitiveType == PrimitiveType.Quads && !HwCapabilities.SupportsQuads)
{
DrawQuadsIndexedImpl(
indexCount,
instanceCount,
indexBaseOffset,
indexElemSize,
firstVertex,
firstInstance);
}
else if (_primitiveType == PrimitiveType.QuadStrip && !HwCapabilities.SupportsQuads)
{
DrawQuadStripIndexedImpl(
indexCount,
instanceCount,
indexBaseOffset,
indexElemSize,
firstVertex,
firstInstance);
}
else
{
DrawIndexedImpl(
indexCount,
instanceCount,
indexBaseOffset,
firstVertex,
firstInstance);
}
PostDraw();
}
private void DrawQuadsIndexedImpl(
int indexCount,
int instanceCount,
IntPtr indexBaseOffset,
int indexElemSize,
int firstVertex,
int firstInstance)
{
int quadsCount = indexCount / 4;
if (firstInstance != 0 || instanceCount != 1)
{
if (firstVertex != 0 && firstInstance != 0)
{
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
GL.DrawElementsInstancedBaseVertexBaseInstance(
PrimitiveType.TriangleFan,
4,
_elementsType,
indexBaseOffset + quadIndex * 4 * indexElemSize,
instanceCount,
firstVertex,
firstInstance);
}
}
else if (firstInstance != 0)
{
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
GL.DrawElementsInstancedBaseInstance(
PrimitiveType.TriangleFan,
4,
_elementsType,
indexBaseOffset + quadIndex * 4 * indexElemSize,
instanceCount,
firstInstance);
}
}
else
{
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
GL.DrawElementsInstanced(
PrimitiveType.TriangleFan,
4,
_elementsType,
indexBaseOffset + quadIndex * 4 * indexElemSize,
instanceCount);
}
}
}
else
{
IntPtr[] indices = new IntPtr[quadsCount];
int[] counts = new int[quadsCount];
int[] baseVertices = new int[quadsCount];
for (int quadIndex = 0; quadIndex < quadsCount; quadIndex++)
{
indices[quadIndex] = indexBaseOffset + quadIndex * 4 * indexElemSize;
counts[quadIndex] = 4;
baseVertices[quadIndex] = firstVertex;
}
GL.MultiDrawElementsBaseVertex(
PrimitiveType.TriangleFan,
counts,
_elementsType,
indices,
quadsCount,
baseVertices);
}
}
private void DrawQuadStripIndexedImpl(
int indexCount,
int instanceCount,
IntPtr indexBaseOffset,
int indexElemSize,
int firstVertex,
int firstInstance)
{
// TODO: Instanced rendering.
int quadsCount = (indexCount - 2) / 2;
IntPtr[] indices = new IntPtr[quadsCount];
int[] counts = new int[quadsCount];
int[] baseVertices = new int[quadsCount];
indices[0] = indexBaseOffset;
counts[0] = 4;
baseVertices[0] = firstVertex;
for (int quadIndex = 1; quadIndex < quadsCount; quadIndex++)
{
indices[quadIndex] = indexBaseOffset + quadIndex * 2 * indexElemSize;
counts[quadIndex] = 4;
baseVertices[quadIndex] = firstVertex;
2019-10-13 08:02:07 +02:00
}
GL.MultiDrawElementsBaseVertex(
PrimitiveType.TriangleFan,
counts,
_elementsType,
indices,
quadsCount,
baseVertices);
}
2019-10-13 08:02:07 +02:00
private void DrawIndexedImpl(
int indexCount,
int instanceCount,
IntPtr indexBaseOffset,
int firstVertex,
int firstInstance)
{
2019-10-13 08:02:07 +02:00
if (firstInstance == 0 && firstVertex == 0 && instanceCount == 1)
{
GL.DrawElements(_primitiveType, indexCount, _elementsType, indexBaseOffset);
}
else if (firstInstance == 0 && instanceCount == 1)
{
GL.DrawElementsBaseVertex(
_primitiveType,
indexCount,
_elementsType,
indexBaseOffset,
firstVertex);
}
else if (firstInstance == 0 && firstVertex == 0)
{
GL.DrawElementsInstanced(
_primitiveType,
indexCount,
_elementsType,
indexBaseOffset,
instanceCount);
}
else if (firstInstance == 0)
{
GL.DrawElementsInstancedBaseVertex(
_primitiveType,
indexCount,
_elementsType,
indexBaseOffset,
instanceCount,
firstVertex);
}
else if (firstVertex == 0)
{
GL.DrawElementsInstancedBaseInstance(
_primitiveType,
indexCount,
_elementsType,
indexBaseOffset,
instanceCount,
firstInstance);
}
else
{
GL.DrawElementsInstancedBaseVertexBaseInstance(
_primitiveType,
indexCount,
_elementsType,
indexBaseOffset,
instanceCount,
firstVertex,
firstInstance);
}
}
public void DrawTexture(ITexture texture, ISampler sampler, Extents2DF srcRegion, Extents2DF dstRegion)
{
if (texture is TextureView view && sampler is Sampler samp)
{
if (HwCapabilities.SupportsDrawTexture)
{
GL.NV.DrawTexture(
view.Handle,
samp.Handle,
dstRegion.X1,
dstRegion.Y1,
dstRegion.X2,
dstRegion.Y2,
0,
srcRegion.X1 / view.Width,
srcRegion.Y1 / view.Height,
srcRegion.X2 / view.Width,
srcRegion.Y2 / view.Height);
}
else
{
static void Disable(EnableCap cap, bool enabled)
{
if (enabled)
{
GL.Disable(cap);
}
}
static void Enable(EnableCap cap, bool enabled)
{
if (enabled)
{
GL.Enable(cap);
}
}
Disable(EnableCap.CullFace, _cullEnable);
Disable(EnableCap.StencilTest, _stencilTestEnable);
Disable(EnableCap.DepthTest, _depthTestEnable);
if (_depthMask)
{
GL.DepthMask(false);
}
if (_tfEnabled)
{
GL.EndTransformFeedback();
}
_drawTexture.Draw(
view,
samp,
dstRegion.X1,
dstRegion.Y1,
dstRegion.X2,
dstRegion.Y2,
srcRegion.X1 / view.Width,
srcRegion.Y1 / view.Height,
srcRegion.X2 / view.Width,
srcRegion.Y2 / view.Height);
_program?.Bind();
_unit0Sampler?.Bind(0);
GL.ViewportArray(0, 1, _viewportArray);
Enable(EnableCap.CullFace, _cullEnable);
Enable(EnableCap.StencilTest, _stencilTestEnable);
Enable(EnableCap.DepthTest, _depthTestEnable);
if (_depthMask)
{
GL.DepthMask(true);
}
if (_tfEnabled)
{
GL.BeginTransformFeedback(_tfTopology);
}
}
}
}
public void EndTransformFeedback()
{
GL.EndTransformFeedback();
_tfEnabled = false;
}
public void MultiDrawIndirectCount(BufferRange indirectBuffer, BufferRange parameterBuffer, int maxDrawCount, int stride)
{
if (!_program.IsLinked)
{
Logger.Debug?.Print(LogClass.Gpu, "Draw error, shader not linked.");
return;
}
PreDraw();
GL.BindBuffer((BufferTarget)All.DrawIndirectBuffer, indirectBuffer.Handle.ToInt32());
GL.BindBuffer((BufferTarget)All.ParameterBuffer, parameterBuffer.Handle.ToInt32());
GL.MultiDrawArraysIndirectCount(
_primitiveType,
(IntPtr)indirectBuffer.Offset,
(IntPtr)parameterBuffer.Offset,
maxDrawCount,
stride);
PostDraw();
}
public void MultiDrawIndexedIndirectCount(BufferRange indirectBuffer, BufferRange parameterBuffer, int maxDrawCount, int stride)
{
if (!_program.IsLinked)
{
Logger.Debug?.Print(LogClass.Gpu, "Draw error, shader not linked.");
return;
}
PreDraw();
_vertexArray.SetRangeOfIndexBuffer();
GL.BindBuffer((BufferTarget)All.DrawIndirectBuffer, indirectBuffer.Handle.ToInt32());
GL.BindBuffer((BufferTarget)All.ParameterBuffer, parameterBuffer.Handle.ToInt32());
GL.MultiDrawElementsIndirectCount(
_primitiveType,
(Version46)_elementsType,
(IntPtr)indirectBuffer.Offset,
(IntPtr)parameterBuffer.Offset,
maxDrawCount,
stride);
_vertexArray.RestoreIndexBuffer();
PostDraw();
}
public void SetAlphaTest(bool enable, float reference, CompareOp op)
{
if (!enable)
{
GL.Disable(EnableCap.AlphaTest);
return;
}
GL.AlphaFunc((AlphaFunction)op.Convert(), reference);
GL.Enable(EnableCap.AlphaTest);
}
2019-12-29 18:41:50 +01:00
public void SetBlendState(int index, BlendDescriptor blend)
2019-10-13 08:02:07 +02:00
{
2019-12-29 18:41:50 +01:00
if (!blend.Enable)
{
GL.Disable(IndexedEnableCap.Blend, index);
return;
}
2019-10-13 08:02:07 +02:00
2019-12-29 18:41:50 +01:00
GL.BlendEquationSeparate(
index,
blend.ColorOp.Convert(),
blend.AlphaOp.Convert());
GL.BlendFuncSeparate(
index,
(BlendingFactorSrc)blend.ColorSrcFactor.Convert(),
(BlendingFactorDest)blend.ColorDstFactor.Convert(),
(BlendingFactorSrc)blend.AlphaSrcFactor.Convert(),
(BlendingFactorDest)blend.AlphaDstFactor.Convert());
static bool IsDualSource(BlendFactor factor)
{
switch (factor)
{
case BlendFactor.Src1Color:
case BlendFactor.Src1ColorGl:
case BlendFactor.Src1Alpha:
case BlendFactor.Src1AlphaGl:
case BlendFactor.OneMinusSrc1Color:
case BlendFactor.OneMinusSrc1ColorGl:
case BlendFactor.OneMinusSrc1Alpha:
case BlendFactor.OneMinusSrc1AlphaGl:
return true;
}
return false;
}
EnsureFramebuffer();
_framebuffer.SetDualSourceBlend(
IsDualSource(blend.ColorSrcFactor) ||
IsDualSource(blend.ColorDstFactor) ||
IsDualSource(blend.AlphaSrcFactor) ||
IsDualSource(blend.AlphaDstFactor));
if (_blendConstant != blend.BlendConstant)
{
_blendConstant = blend.BlendConstant;
GL.BlendColor(
blend.BlendConstant.Red,
blend.BlendConstant.Green,
blend.BlendConstant.Blue,
blend.BlendConstant.Alpha);
}
2019-12-29 18:41:50 +01:00
GL.Enable(IndexedEnableCap.Blend, index);
2019-10-13 08:02:07 +02:00
}
public void SetDepthBias(PolygonModeMask enables, float factor, float units, float clamp)
{
if ((enables & PolygonModeMask.Point) != 0)
{
GL.Enable(EnableCap.PolygonOffsetPoint);
}
else
{
GL.Disable(EnableCap.PolygonOffsetPoint);
}
if ((enables & PolygonModeMask.Line) != 0)
{
GL.Enable(EnableCap.PolygonOffsetLine);
}
else
{
GL.Disable(EnableCap.PolygonOffsetLine);
}
if ((enables & PolygonModeMask.Fill) != 0)
{
GL.Enable(EnableCap.PolygonOffsetFill);
}
else
{
GL.Disable(EnableCap.PolygonOffsetFill);
}
if (enables == 0)
{
return;
}
if (HwCapabilities.SupportsPolygonOffsetClamp)
{
GL.PolygonOffsetClamp(factor, units, clamp);
}
else
{
GL.PolygonOffset(factor, units);
}
2019-10-13 08:02:07 +02:00
}
public void SetDepthClamp(bool clamp)
{
if (!clamp)
{
GL.Disable(EnableCap.DepthClamp);
return;
}
GL.Enable(EnableCap.DepthClamp);
}
public void SetDepthMode(DepthMode mode)
{
ClipDepthMode depthMode = mode.Convert();
if (_clipDepthMode != depthMode)
{
_clipDepthMode = depthMode;
GL.ClipControl(_clipOrigin, depthMode);
}
}
2019-10-13 08:02:07 +02:00
public void SetDepthTest(DepthTestDescriptor depthTest)
{
if (depthTest.TestEnable)
{
GL.Enable(EnableCap.DepthTest);
GL.DepthFunc((DepthFunction)depthTest.Func.Convert());
}
else
{
GL.Disable(EnableCap.DepthTest);
}
2019-10-13 08:02:07 +02:00
GL.DepthMask(depthTest.WriteEnable);
2019-10-13 08:02:07 +02:00
_depthMask = depthTest.WriteEnable;
_depthTestEnable = depthTest.TestEnable;
2019-10-13 08:02:07 +02:00
}
public void SetFaceCulling(bool enable, Face face)
{
_cullEnable = enable;
2019-10-13 08:02:07 +02:00
if (!enable)
{
GL.Disable(EnableCap.CullFace);
return;
}
GL.CullFace(face.Convert());
GL.Enable(EnableCap.CullFace);
}
public void SetFrontFace(FrontFace frontFace)
{
SetFrontFace(_frontFace = frontFace.Convert());
2019-10-13 08:02:07 +02:00
}
public void SetImage(int binding, ITexture texture, Format imageFormat)
2019-12-29 18:41:50 +01:00
{
if (texture == null)
2019-12-29 18:41:50 +01:00
{
return;
}
2019-12-29 18:41:50 +01:00
TextureBase texBase = (TextureBase)texture;
2019-12-29 18:41:50 +01:00
SizedInternalFormat format = FormatTable.GetImageFormat(imageFormat);
if (format != 0)
{
GL.BindImageTexture(binding, texBase.Handle, 0, true, 0, TextureAccess.ReadWrite, format);
2019-12-29 18:41:50 +01:00
}
}
public void SetIndexBuffer(BufferRange buffer, IndexType type)
{
_elementsType = type.Convert();
_indexBaseOffset = (IntPtr)buffer.Offset;
EnsureVertexArray();
_vertexArray.SetIndexBuffer(buffer);
2019-12-29 18:41:50 +01:00
}
public void SetLogicOpState(bool enable, LogicalOp op)
{
if (enable)
{
GL.Enable(EnableCap.ColorLogicOp);
GL.LogicOp((LogicOp)op.Convert());
}
else
{
GL.Disable(EnableCap.ColorLogicOp);
}
}
public void SetLineParameters(float width, bool smooth)
{
if (smooth)
{
GL.Enable(EnableCap.LineSmooth);
}
else
{
GL.Disable(EnableCap.LineSmooth);
}
GL.LineWidth(width);
}
public unsafe void SetPatchParameters(int vertices, ReadOnlySpan<float> defaultOuterLevel, ReadOnlySpan<float> defaultInnerLevel)
{
GL.PatchParameter(PatchParameterInt.PatchVertices, vertices);
fixed (float* pOuterLevel = defaultOuterLevel)
{
GL.PatchParameter(PatchParameterFloat.PatchDefaultOuterLevel, pOuterLevel);
}
fixed (float* pInnerLevel = defaultInnerLevel)
{
GL.PatchParameter(PatchParameterFloat.PatchDefaultInnerLevel, pInnerLevel);
}
}
public void SetPointParameters(float size, bool isProgramPointSize, bool enablePointSprite, Origin origin)
2020-02-02 00:19:46 +01:00
{
// GL_POINT_SPRITE was deprecated in core profile 3.2+ and causes GL_INVALID_ENUM when set.
// As we don't know if the current context is core or compat, it's safer to keep this code.
if (enablePointSprite)
{
GL.Enable(EnableCap.PointSprite);
}
else
{
GL.Disable(EnableCap.PointSprite);
}
if (isProgramPointSize)
{
GL.Enable(EnableCap.ProgramPointSize);
}
else
{
GL.Disable(EnableCap.ProgramPointSize);
}
GL.PointParameter(origin == Origin.LowerLeft
? PointSpriteCoordOriginParameter.LowerLeft
: PointSpriteCoordOriginParameter.UpperLeft);
// Games seem to set point size to 0 which generates a GL_INVALID_VALUE
// From the spec, GL_INVALID_VALUE is generated if size is less than or equal to 0.
GL.PointSize(Math.Max(float.Epsilon, size));
2020-02-02 00:19:46 +01:00
}
public void SetPolygonMode(GAL.PolygonMode frontMode, GAL.PolygonMode backMode)
{
if (frontMode == backMode)
{
GL.PolygonMode(MaterialFace.FrontAndBack, frontMode.Convert());
}
else
{
GL.PolygonMode(MaterialFace.Front, frontMode.Convert());
GL.PolygonMode(MaterialFace.Back, backMode.Convert());
}
}
2019-10-13 08:02:07 +02:00
public void SetPrimitiveRestart(bool enable, int index)
{
if (!enable)
{
GL.Disable(EnableCap.PrimitiveRestart);
return;
}
GL.PrimitiveRestartIndex(index);
GL.Enable(EnableCap.PrimitiveRestart);
}
public void SetPrimitiveTopology(PrimitiveTopology topology)
{
_primitiveType = topology.Convert();
}
2019-12-29 18:41:50 +01:00
public void SetProgram(IProgram program)
{
_program = (Program)program;
if (_tfEnabled)
{
GL.EndTransformFeedback();
_program.Bind();
GL.BeginTransformFeedback(_tfTopology);
}
else
{
_program.Bind();
}
2019-12-29 18:41:50 +01:00
}
public void SetRasterizerDiscard(bool discard)
{
if (discard)
{
GL.Enable(EnableCap.RasterizerDiscard);
}
else
{
GL.Disable(EnableCap.RasterizerDiscard);
}
_rasterizerDiscard = discard;
}
public void SetRenderTargetScale(float scale)
{
_renderScale[0].X = scale;
SetSupportBufferData<Vector4<float>>(SupportBuffer.FragmentRenderScaleOffset, _renderScale, 1); // Just the first element.
}
public void SetRenderTargetColorMasks(ReadOnlySpan<uint> componentMasks)
2019-10-13 08:02:07 +02:00
{
for (int index = 0; index < componentMasks.Length; index++)
{
_componentMasks[index] = componentMasks[index];
2019-10-13 08:02:07 +02:00
RestoreComponentMask(index);
}
}
public void SetRenderTargets(ITexture[] colors, ITexture depthStencil)
{
EnsureFramebuffer();
bool isBgraChanged = false;
2019-10-13 08:02:07 +02:00
for (int index = 0; index < colors.Length; index++)
{
TextureView color = (TextureView)colors[index];
_framebuffer.AttachColor(index, color);
int isBgra = color != null && color.Format.IsBgr() ? 1 : 0;
if (_fpIsBgra[index].X != isBgra)
{
_fpIsBgra[index].X = isBgra;
isBgraChanged = true;
RestoreComponentMask(index);
}
2019-10-13 08:02:07 +02:00
}
if (isBgraChanged)
{
SetSupportBufferData<Vector4<int>>(SupportBuffer.FragmentIsBgraOffset, _fpIsBgra, SupportBuffer.FragmentIsBgraCount);
}
2019-10-13 08:02:07 +02:00
TextureView depthStencilView = (TextureView)depthStencil;
_framebuffer.AttachDepthStencil(depthStencilView);
_framebuffer.SetDrawBuffers(colors.Length);
}
public void SetSampler(int binding, ISampler sampler)
2019-12-29 18:41:50 +01:00
{
if (sampler == null)
2019-12-29 18:41:50 +01:00
{
return;
2019-12-29 18:41:50 +01:00
}
Sampler samp = (Sampler)sampler;
if (binding == 0)
{
_unit0Sampler = samp;
}
samp.Bind(binding);
2019-12-29 18:41:50 +01:00
}
2021-01-26 22:44:07 +01:00
public void SetScissor(int index, bool enable, int x, int y, int width, int height)
{
2021-01-26 22:44:07 +01:00
uint mask = 1u << index;
if (!enable)
{
2021-01-26 22:44:07 +01:00
if ((_scissorEnables & mask) != 0)
{
_scissorEnables &= ~mask;
GL.Disable(IndexedEnableCap.ScissorTest, index);
}
return;
}
2021-01-26 22:44:07 +01:00
if ((_scissorEnables & mask) == 0)
{
2021-01-26 22:44:07 +01:00
_scissorEnables |= mask;
GL.Enable(IndexedEnableCap.ScissorTest, index);
}
GL.ScissorIndexed(index, x, y, width, height);
}
2019-10-13 08:02:07 +02:00
public void SetStencilTest(StencilTestDescriptor stencilTest)
{
_stencilTestEnable = stencilTest.TestEnable;
2019-10-13 08:02:07 +02:00
if (!stencilTest.TestEnable)
{
GL.Disable(EnableCap.StencilTest);
return;
}
GL.StencilOpSeparate(
StencilFace.Front,
stencilTest.FrontSFail.Convert(),
stencilTest.FrontDpFail.Convert(),
stencilTest.FrontDpPass.Convert());
GL.StencilFuncSeparate(
StencilFace.Front,
(StencilFunction)stencilTest.FrontFunc.Convert(),
stencilTest.FrontFuncRef,
stencilTest.FrontFuncMask);
GL.StencilMaskSeparate(StencilFace.Front, stencilTest.FrontMask);
GL.StencilOpSeparate(
StencilFace.Back,
stencilTest.BackSFail.Convert(),
stencilTest.BackDpFail.Convert(),
stencilTest.BackDpPass.Convert());
GL.StencilFuncSeparate(
StencilFace.Back,
(StencilFunction)stencilTest.BackFunc.Convert(),
stencilTest.BackFuncRef,
stencilTest.BackFuncMask);
GL.StencilMaskSeparate(StencilFace.Back, stencilTest.BackMask);
GL.Enable(EnableCap.StencilTest);
_stencilFrontMask = stencilTest.FrontMask;
}
public void SetStorageBuffers(int first, ReadOnlySpan<BufferRange> buffers)
2019-12-29 18:41:50 +01:00
{
SetBuffers(first, buffers, isStorage: true);
2019-12-29 18:41:50 +01:00
}
public void SetTexture(int binding, ITexture texture)
2019-12-29 18:41:50 +01:00
{
if (texture == null)
{
return;
}
2019-12-29 18:41:50 +01:00
if (binding == 0)
2019-12-29 18:41:50 +01:00
{
_unit0Texture = (TextureBase)texture;
}
else
{
((TextureBase)texture).Bind(binding);
2019-12-29 18:41:50 +01:00
}
}
public void SetTransformFeedbackBuffers(ReadOnlySpan<BufferRange> buffers)
{
if (_tfEnabled)
{
GL.EndTransformFeedback();
}
int count = Math.Min(buffers.Length, Constants.MaxTransformFeedbackBuffers);
for (int i = 0; i < count; i++)
{
BufferRange buffer = buffers[i];
_tfbTargets[i] = buffer;
if (buffer.Handle == BufferHandle.Null)
{
GL.BindBufferBase(BufferRangeTarget.TransformFeedbackBuffer, i, 0);
continue;
}
if (_tfbs[i] == BufferHandle.Null)
{
_tfbs[i] = Buffer.Create();
}
Buffer.Resize(_tfbs[i], buffer.Size);
Buffer.Copy(buffer.Handle, _tfbs[i], buffer.Offset, 0, buffer.Size);
GL.BindBufferBase(BufferRangeTarget.TransformFeedbackBuffer, i, _tfbs[i].ToInt32());
}
if (_tfEnabled)
{
GL.BeginTransformFeedback(_tfTopology);
}
}
public void SetUniformBuffers(int first, ReadOnlySpan<BufferRange> buffers)
2019-12-29 18:41:50 +01:00
{
SetBuffers(first, buffers, isStorage: false);
2019-12-29 18:41:50 +01:00
}
public void SetUserClipDistance(int index, bool enableClip)
{
if (!enableClip)
{
GL.Disable(EnableCap.ClipDistance0 + index);
return;
}
GL.Enable(EnableCap.ClipDistance0 + index);
}
public void SetVertexAttribs(ReadOnlySpan<VertexAttribDescriptor> vertexAttribs)
2019-12-29 18:41:50 +01:00
{
EnsureVertexArray();
_vertexArray.SetVertexAttributes(vertexAttribs);
}
public void SetVertexBuffers(ReadOnlySpan<VertexBufferDescriptor> vertexBuffers)
2019-12-29 18:41:50 +01:00
{
EnsureVertexArray();
_vertexArray.SetVertexBuffers(vertexBuffers);
}
public void SetViewports(int first, ReadOnlySpan<Viewport> viewports)
2019-10-13 08:02:07 +02:00
{
Array.Resize(ref _viewportArray, viewports.Length * 4);
Array.Resize(ref _depthRangeArray, viewports.Length * 2);
2019-10-13 08:02:07 +02:00
float[] viewportArray = _viewportArray;
double[] depthRangeArray = _depthRangeArray;
2019-10-13 08:02:07 +02:00
for (int index = 0; index < viewports.Length; index++)
{
int viewportElemIndex = index * 4;
Viewport viewport = viewports[index];
viewportArray[viewportElemIndex + 0] = viewport.Region.X;
viewportArray[viewportElemIndex + 1] = viewport.Region.Y + (viewport.Region.Height < 0 ? viewport.Region.Height : 0);
viewportArray[viewportElemIndex + 2] = viewport.Region.Width;
viewportArray[viewportElemIndex + 3] = MathF.Abs(viewport.Region.Height);
2019-10-13 08:02:07 +02:00
if (HwCapabilities.SupportsViewportSwizzle)
2019-10-13 08:02:07 +02:00
{
GL.NV.ViewportSwizzle(
index,
viewport.SwizzleX.Convert(),
viewport.SwizzleY.Convert(),
viewport.SwizzleZ.Convert(),
viewport.SwizzleW.Convert());
2019-10-13 08:02:07 +02:00
}
depthRangeArray[index * 2 + 0] = viewport.DepthNear;
depthRangeArray[index * 2 + 1] = viewport.DepthFar;
}
bool flipY = viewports.Length != 0 && viewports[0].Region.Height < 0;
SetOrigin(flipY ? ClipOrigin.UpperLeft : ClipOrigin.LowerLeft);
2019-10-13 08:02:07 +02:00
GL.ViewportArray(first, viewports.Length, viewportArray);
GL.DepthRangeArray(first, viewports.Length, depthRangeArray);
}
public void TextureBarrier()
{
GL.MemoryBarrier(MemoryBarrierFlags.TextureFetchBarrierBit);
}
public void TextureBarrierTiled()
{
GL.MemoryBarrier(MemoryBarrierFlags.TextureFetchBarrierBit);
}
private void SetBuffers(int first, ReadOnlySpan<BufferRange> buffers, bool isStorage)
2019-12-29 18:41:50 +01:00
{
BufferRangeTarget target = isStorage ? BufferRangeTarget.ShaderStorageBuffer : BufferRangeTarget.UniformBuffer;
2019-12-29 18:41:50 +01:00
for (int index = 0; index < buffers.Length; index++)
2019-12-29 18:41:50 +01:00
{
BufferRange buffer = buffers[index];
2019-12-29 18:41:50 +01:00
if (buffer.Handle == BufferHandle.Null)
{
GL.BindBufferRange(target, first + index, 0, IntPtr.Zero, 0);
continue;
}
2019-12-29 18:41:50 +01:00
GL.BindBufferRange(target, first + index, buffer.Handle.ToInt32(), (IntPtr)buffer.Offset, buffer.Size);
2019-12-29 18:41:50 +01:00
}
}
2019-10-13 08:02:07 +02:00
private void SetOrigin(ClipOrigin origin)
{
if (_clipOrigin != origin)
{
_clipOrigin = origin;
GL.ClipControl(origin, _clipDepthMode);
SetFrontFace(_frontFace);
2019-10-13 08:02:07 +02:00
}
}
private void SetFrontFace(FrontFaceDirection frontFace)
{
// Changing clip origin will also change the front face to compensate
// for the flipped viewport, we flip it again here to compensate as
// this effect is undesirable for us.
if (_clipOrigin == ClipOrigin.UpperLeft)
{
frontFace = frontFace == FrontFaceDirection.Ccw ? FrontFaceDirection.Cw : FrontFaceDirection.Ccw;
}
GL.FrontFace(frontFace);
}
2019-10-13 08:02:07 +02:00
private void EnsureVertexArray()
{
if (_vertexArray == null)
{
_vertexArray = new VertexArray();
_vertexArray.Bind();
}
}
private void EnsureFramebuffer()
{
if (_framebuffer == null)
{
_framebuffer = new Framebuffer();
int boundHandle = _framebuffer.Bind();
_boundDrawFramebuffer = _boundReadFramebuffer = boundHandle;
2019-10-13 08:02:07 +02:00
GL.Enable(EnableCap.FramebufferSrgb);
}
}
internal (int drawHandle, int readHandle) GetBoundFramebuffers()
{
Memory Read/Write Tracking using Region Handles (#1272) * WIP Range Tracking - Texture invalidation seems to have large problems - Buffer/Pool invalidation may have problems - Mirror memory tracking puts an additional `add` in compiled code, we likely just want to make HLE access slower if this is the final solution. - Native project is in the messiest possible location. - [HACK] JIT memory access always uses native "fast" path - [HACK] Trying some things with texture invalidation and views. It works :) Still a few hacks, messy things, slow things More work in progress stuff (also move to memory project) Quite a bit faster now. - Unmapping GPU VA and CPU VA will now correctly update write tracking regions, and invalidate textures for the former. - The Virtual range list is now non-overlapping like the physical one. - Fixed some bugs where regions could leak. - Introduced a weird bug that I still need to track down (consistent invalid buffer in MK8 ribbon road) Move some stuff. I think we'll eventually just put the dll and so for this in a nuget package. Fix rebase. [WIP] MultiRegionHandle variable size ranges - Avoid reprotecting regions that change often (needs some tweaking) - There's still a bug in buffers, somehow. - Might want different api for minimum granularity Fix rebase issue Commit everything needed for software only tracking. Remove native components. Remove more native stuff. Cleanup Use a separate window for the background context, update opentk. (fixes linux) Some experimental changes Should get things working up to scratch - still need to try some things with flush/modification and res scale. Include address with the region action. Initial work to make range tracking work Still a ton of bugs Fix some issues with the new stuff. * Fix texture flush instability There's still some weird behaviour, but it's much improved without this. (textures with cpu modified data were flushing over it) * Find the destination texture for Buffer->Texture full copy Greatly improves performance for nvdec videos (with range tracking) * Further improve texture tracking * Disable Memory Tracking for view parents This is a temporary approach to better match behaviour on master (where invalidations would be soaked up by views, rather than trigger twice) The assumption is that when views are created to a texture, they will cover all of its data anyways. Of course, this can easily be improved in future. * Introduce some tracking tests. WIP * Complete base tests. * Add more tests for multiregion, fix existing test. * Cleanup Part 1 * Remove unnecessary code from memory tracking * Fix some inconsistencies with 3D texture rule. * Add dispose tests. * Use a background thread for the background context. Rather than setting and unsetting a context as current, doing the work on a dedicated thread with signals seems to be a bit faster. Also nerf the multithreading test a bit. * Copy to texture with matching alignment This extends the copy to work for some videos with unusual size, such as tutorial videos in SMO. It will only occur if the destination texture already exists at XCount size. * Track reads for buffer copies. Synchronize new buffers before copying overlaps. * Remove old texture flushing mechanisms. Range tracking all the way, baby. * Wake the background thread when disposing. Avoids a deadlock when games are closed. * Address Feedback 1 * Separate TextureCopy instance for background thread Also `BackgroundContextWorker.InBackground` for a more sensible idenfifier for if we're in a background thread. * Add missing XML docs. * Address Feedback * Maybe I should start drinking coffee. * Some more feedback. * Remove flush warning, Refocus window after making background context
2020-10-16 22:18:35 +02:00
if (BackgroundContextWorker.InBackground)
{
return (0, 0);
}
return (_boundDrawFramebuffer, _boundReadFramebuffer);
}
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
public void UpdateRenderScale(ShaderStage stage, ReadOnlySpan<float> scales, int textureCount, int imageCount)
{
if (stage != ShaderStage.Compute && stage != ShaderStage.Fragment)
{
return;
}
bool changed = false;
for (int index = 0; index < textureCount + imageCount; index++)
{
if (_renderScale[1 + index].X != scales[index])
{
_renderScale[1 + index].X = scales[index];
changed = true;
}
}
if (changed)
{
SetSupportBufferData<Vector4<float>>(SupportBuffer.FragmentRenderScaleOffset, _renderScale, 1 + textureCount + imageCount);
}
}
private void SetSupportBufferData<T>(int offset, ReadOnlySpan<T> data, int count) where T : unmanaged
{
Buffer.SetData(_supportBuffer, offset, MemoryMarshal.Cast<T, byte>(data.Slice(0, count)));
}
private void PrepareForDispatch()
{
_unit0Texture?.Bind(0);
}
private void PreDraw()
2019-10-13 08:02:07 +02:00
{
DrawCount++;
2019-10-13 08:02:07 +02:00
_vertexArray.Validate();
_unit0Texture?.Bind(0);
2019-10-13 08:02:07 +02:00
}
private void PostDraw()
{
if (_tfEnabled)
{
for (int i = 0; i < Constants.MaxTransformFeedbackBuffers; i++)
{
if (_tfbTargets[i].Handle != BufferHandle.Null)
{
Buffer.Copy(_tfbs[i], _tfbTargets[i].Handle, 0, _tfbTargets[i].Offset, _tfbTargets[i].Size);
}
}
}
}
public void RestoreComponentMask(int index)
2019-10-13 08:02:07 +02:00
{
// If the bound render target is bgra, swap the red and blue masks.
uint redMask = _fpIsBgra[index].X == 0 ? 1u : 4u;
uint blueMask = _fpIsBgra[index].X == 0 ? 4u : 1u;
GL.ColorMask(
index,
(_componentMasks[index] & redMask) != 0,
(_componentMasks[index] & 2u) != 0,
(_componentMasks[index] & blueMask) != 0,
(_componentMasks[index] & 8u) != 0);
2019-10-13 08:02:07 +02:00
}
2019-12-31 23:09:49 +01:00
public void RestoreScissor0Enable()
{
2021-01-26 22:44:07 +01:00
if ((_scissorEnables & 1u) != 0)
{
GL.Enable(IndexedEnableCap.ScissorTest, 0);
}
}
public void RestoreRasterizerDiscard()
{
if (_rasterizerDiscard)
{
GL.Enable(EnableCap.RasterizerDiscard);
}
}
public bool TryHostConditionalRendering(ICounterEvent value, ulong compare, bool isEqual)
{
if (value is CounterQueueEvent)
{
// Compare an event and a constant value.
CounterQueueEvent evt = (CounterQueueEvent)value;
// Easy host conditional rendering when the check matches what GL can do:
// - Event is of type samples passed.
// - Result is not a combination of multiple queries.
// - Comparing against 0.
// - Event has not already been flushed.
if (compare == 0 && evt.Type == QueryTarget.SamplesPassed && evt.ClearCounter)
{
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
if (!value.ReserveForHostAccess())
{
// If the event has been flushed, then just use the values on the CPU.
// The query object may already be repurposed for another draw (eg. begin + end).
return false;
}
GL.BeginConditionalRender(evt.Query, isEqual ? ConditionalRenderType.QueryNoWaitInverted : ConditionalRenderType.QueryNoWait);
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
_activeConditionalRender = evt;
return true;
}
}
// The GPU will flush the queries to CPU and evaluate the condition there instead.
GL.Flush(); // The thread will be stalled manually flushing the counter, so flush GL commands now.
return false;
}
public bool TryHostConditionalRendering(ICounterEvent value, ICounterEvent compare, bool isEqual)
{
GL.Flush(); // The GPU thread will be stalled manually flushing the counter, so flush GL commands now.
return false; // We don't currently have a way to compare two counters for conditional rendering.
}
public void EndHostConditionalRendering()
{
GL.EndConditionalRender();
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
_activeConditionalRender?.ReleaseHostAccess();
_activeConditionalRender = null;
}
2019-12-31 23:09:49 +01:00
public void Dispose()
{
if (_supportBuffer != BufferHandle.Null)
{
Buffer.Delete(_supportBuffer);
_supportBuffer = BufferHandle.Null;
}
for (int i = 0; i < Constants.MaxTransformFeedbackBuffers; i++)
{
if (_tfbs[i] != BufferHandle.Null)
{
Buffer.Delete(_tfbs[i]);
_tfbs[i] = BufferHandle.Null;
}
}
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501) * Initial Implementation About as fast as nvidia GL multithreading, can be improved with faster command queuing. * Struct based command list Speeds up a bit. Still a lot of time lost to resource copy. * Do shader init while the render thread is active. * Introduce circular span pool V1 Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next. * Refactor SpanRef some more Use a struct to represent SpanRef, rather than a reference. * Flush buffers on background thread * Use a span for UpdateRenderScale. Much faster than copying the array. * Calculate command size using reflection * WIP parallel shaders * Some minor optimisation * Only 2 max refs per command now. The command with 3 refs is gone. :relieved: * Don't cast on the GPU side * Remove redundant casts, force sync on window present * Fix Shader Cache * Fix host shader save. * Fixup to work with new renderer stuff * Make command Run static, use array of delegates as lookup Profile says this takes less time than the previous way. * Bring up to date * Add settings toggle. Fix Muiltithreading Off mode. * Fix warning. * Release tracking lock for flushes * Fix Conditional Render fast path with threaded gal * Make handle iteration safe when releasing the lock This is mostly temporary. * Attempt to set backend threading on driver Only really works on nvidia before launching a game. * Fix race condition with BufferModifiedRangeList, exceptions in tracking actions * Update buffer set commands * Some cleanup * Only use stutter workaround when using opengl renderer non-threaded * Add host-conditional reservation of counter events There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made. * Address Feedback * Make counter flush tracked again. Hopefully does not cause any issues this time. * Wait for FlushTo on the main queue thread. Currently assumes only one thread will want to FlushTo (in this case, the GPU thread) * Add SDL2 headless integration * Add HLE macro commands. Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00
_activeConditionalRender?.ReleaseHostAccess();
2019-12-31 23:09:49 +01:00
_framebuffer?.Dispose();
_vertexArray?.Dispose();
_drawTexture.Dispose();
2019-12-31 23:09:49 +01:00
}
2019-10-13 08:02:07 +02:00
}
}