using Ryujinx.Common.Memory; using Ryujinx.Cpu.Tracking; using Ryujinx.Graphics.GAL; using Ryujinx.Graphics.Gpu.Memory; using Ryujinx.Graphics.Texture; using Ryujinx.Memory; using Ryujinx.Memory.Range; using System; using System.Collections.Generic; using System.Runtime.CompilerServices; namespace Ryujinx.Graphics.Gpu.Image { /// /// An overlapping texture group with a given view compatibility. /// readonly struct TextureIncompatibleOverlap { public readonly TextureGroup Group; public readonly TextureViewCompatibility Compatibility; /// /// Create a new texture incompatible overlap. /// /// The group that is incompatible /// The view compatibility for the group public TextureIncompatibleOverlap(TextureGroup group, TextureViewCompatibility compatibility) { Group = group; Compatibility = compatibility; } } /// /// A texture group represents a group of textures that belong to the same storage. /// When views are created, this class will track memory accesses for them separately. /// The group iteratively adds more granular tracking as views of different kinds are added. /// Note that a texture group can be absorbed into another when it becomes a view parent. /// class TextureGroup : IDisposable { /// /// Threshold of layers to force granular handles (and thus partial loading) on array/3D textures. /// private const int GranularLayerThreshold = 8; private delegate void HandlesCallbackDelegate(int baseHandle, int regionCount, bool split = false); /// /// The storage texture associated with this group. /// public Texture Storage { get; } /// /// Indicates if the texture has copy dependencies. If true, then all modifications /// must be signalled to the group, rather than skipping ones still to be flushed. /// public bool HasCopyDependencies { get; set; } /// /// Indicates if this texture has any incompatible overlaps alive. /// public bool HasIncompatibleOverlaps => _incompatibleOverlaps.Count > 0; private readonly GpuContext _context; private readonly PhysicalMemory _physicalMemory; private int[] _allOffsets; private int[] _sliceSizes; private bool _is3D; private bool _hasMipViews; private bool _hasLayerViews; private int _layers; private int _levels; private MultiRange TextureRange => Storage.Range; /// /// The views list from the storage texture. /// private List _views; private TextureGroupHandle[] _handles; private bool[] _loadNeeded; /// /// Other texture groups that have incompatible overlaps with this one. /// private List _incompatibleOverlaps; private bool _incompatibleOverlapsDirty = true; private bool _flushIncompatibleOverlaps; /// /// Create a new texture group. /// /// GPU context that the texture group belongs to /// Physical memory where the texture is mapped /// The storage texture for this group /// Groups that overlap with this one but are incompatible public TextureGroup(GpuContext context, PhysicalMemory physicalMemory, Texture storage, List incompatibleOverlaps) { Storage = storage; _context = context; _physicalMemory = physicalMemory; _is3D = storage.Info.Target == Target.Texture3D; _layers = storage.Info.GetSlices(); _levels = storage.Info.Levels; _incompatibleOverlaps = incompatibleOverlaps; _flushIncompatibleOverlaps = TextureCompatibility.IsFormatHostIncompatible(storage.Info, context.Capabilities); } /// /// Initialize a new texture group's dirty regions and offsets. /// /// Size info for the storage texture /// True if the storage will have layer views /// True if the storage will have mip views public void Initialize(ref SizeInfo size, bool hasLayerViews, bool hasMipViews) { _allOffsets = size.AllOffsets; _sliceSizes = size.SliceSizes; if (Storage.Target.HasDepthOrLayers() && Storage.Info.GetSlices() > GranularLayerThreshold) { _hasLayerViews = true; _hasMipViews = true; } else { (_hasLayerViews, _hasMipViews) = PropagateGranularity(hasLayerViews, hasMipViews); // If the texture is partially mapped, fully subdivide handles immediately. MultiRange range = Storage.Range; for (int i = 0; i < range.Count; i++) { if (range.GetSubRange(i).Address == MemoryManager.PteUnmapped) { _hasLayerViews = true; _hasMipViews = true; break; } } } RecalculateHandleRegions(); } /// /// Initialize all incompatible overlaps in the list, registering them with the other texture groups /// and creating copy dependencies when partially compatible. /// public void InitializeOverlaps() { foreach (TextureIncompatibleOverlap overlap in _incompatibleOverlaps) { if (overlap.Compatibility == TextureViewCompatibility.LayoutIncompatible) { CreateCopyDependency(overlap.Group, false); } overlap.Group._incompatibleOverlaps.Add(new TextureIncompatibleOverlap(this, overlap.Compatibility)); overlap.Group._incompatibleOverlapsDirty = true; } if (_incompatibleOverlaps.Count > 0) { SignalIncompatibleOverlapModified(); } } /// /// Signal that the group is dirty to all views and the storage. /// private void SignalAllDirty() { Storage.SignalGroupDirty(); if (_views != null) { foreach (Texture texture in _views) { texture.SignalGroupDirty(); } } } /// /// Signal that an incompatible overlap has been modified. /// If this group must flush incompatible overlaps, the group is signalled as dirty too. /// private void SignalIncompatibleOverlapModified() { _incompatibleOverlapsDirty = true; if (_flushIncompatibleOverlaps) { SignalAllDirty(); } } /// /// Flushes incompatible overlaps if the storage format requires it, and they have been modified. /// This allows unsupported host formats to accept data written to format aliased textures. /// /// True if data was flushed, false otherwise [MethodImpl(MethodImplOptions.AggressiveInlining)] public bool FlushIncompatibleOverlapsIfNeeded() { if (_flushIncompatibleOverlaps && _incompatibleOverlapsDirty) { bool flushed = false; foreach (var overlap in _incompatibleOverlaps) { flushed |= overlap.Group.Storage.FlushModified(true); } _incompatibleOverlapsDirty = false; return flushed; } else { return false; } } /// /// Check and optionally consume the dirty flags for a given texture. /// The state is shared between views of the same layers and levels. /// /// The texture being used /// True to consume the dirty flags and reprotect, false to leave them as is /// True if a flag was dirty, false otherwise public bool CheckDirty(Texture texture, bool consume) { bool dirty = false; EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; foreach (CpuRegionHandle handle in group.Handles) { if (handle.Dirty) { if (consume) { handle.Reprotect(); } dirty = true; } } } }); return dirty; } /// /// Synchronize memory for a given texture. /// If overlapping tracking handles are dirty, fully or partially synchronize the texture data. /// /// The texture being used public void SynchronizeMemory(Texture texture) { FlushIncompatibleOverlapsIfNeeded(); EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { bool dirty = false; bool anyModified = false; bool anyNotDirty = false; for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; bool modified = group.Modified; bool handleDirty = false; bool handleUnmapped = false; foreach (CpuRegionHandle handle in group.Handles) { if (handle.Dirty) { handle.Reprotect(); handleDirty = true; } else { handleUnmapped |= handle.Unmapped; } } // If the modified flag is still present, prefer the data written from gpu. // A write from CPU will do a flush before writing its data, which should unset this. if (modified) { handleDirty = false; } // Evaluate if any copy dependencies need to be fulfilled. A few rules: // If the copy handle needs to be synchronized, prefer our own state. // If we need to be synchronized and there is a copy present, prefer the copy. if (group.NeedsCopy && group.Copy(_context)) { anyModified |= true; // The copy target has been modified. handleDirty = false; } else { anyModified |= modified; dirty |= handleDirty; } if (group.NeedsCopy) { // The texture we copied from is still being written to. Copy from it again the next time this texture is used. texture.SignalGroupDirty(); } bool loadNeeded = handleDirty && !handleUnmapped; anyNotDirty |= !loadNeeded; _loadNeeded[baseHandle + i] = loadNeeded; } if (dirty) { if (anyNotDirty || (_handles.Length > 1 && (anyModified || split))) { // Partial texture invalidation. Only update the layers/levels with dirty flags of the storage. SynchronizePartial(baseHandle, regionCount); } else { // Full texture invalidation. texture.SynchronizeFull(); } } }); } /// /// Synchronize part of the storage texture, represented by a given range of handles. /// Only handles marked by the _loadNeeded array will be synchronized. /// /// The base index of the range of handles /// The number of handles to synchronize private void SynchronizePartial(int baseHandle, int regionCount) { int spanEndIndex = -1; int spanBase = 0; ReadOnlySpan dataSpan = ReadOnlySpan.Empty; for (int i = 0; i < regionCount; i++) { if (_loadNeeded[baseHandle + i]) { var info = GetHandleInformation(baseHandle + i); // Ensure the data for this handle is loaded in the span. if (spanEndIndex <= i - 1) { spanEndIndex = i; if (_is3D) { // Look ahead to see how many handles need to be loaded. for (int j = i + 1; j < regionCount; j++) { if (_loadNeeded[baseHandle + j]) { spanEndIndex = j; } else { break; } } } var endInfo = spanEndIndex == i ? info : GetHandleInformation(baseHandle + spanEndIndex); spanBase = _allOffsets[info.Index]; int spanLast = _allOffsets[endInfo.Index + endInfo.Layers * endInfo.Levels - 1]; int endOffset = Math.Min(spanLast + _sliceSizes[endInfo.BaseLevel + endInfo.Levels - 1], (int)Storage.Size); int size = endOffset - spanBase; dataSpan = _physicalMemory.GetSpan(Storage.Range.Slice((ulong)spanBase, (ulong)size)); } // Only one of these will be greater than 1, as partial sync is only called when there are sub-image views. for (int layer = 0; layer < info.Layers; layer++) { for (int level = 0; level < info.Levels; level++) { int offsetIndex = GetOffsetIndex(info.BaseLayer + layer, info.BaseLevel + level); int offset = _allOffsets[offsetIndex]; ReadOnlySpan data = dataSpan.Slice(offset - spanBase); SpanOrArray result = Storage.ConvertToHostCompatibleFormat(data, info.BaseLevel + level, true); Storage.SetData(result, info.BaseLayer + layer, info.BaseLevel + level); } } } } } /// /// Synchronize dependent textures, if any of them have deferred a copy from the given texture. /// /// The texture to synchronize dependents of public void SynchronizeDependents(Texture texture) { EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; group.SynchronizeDependents(); } }); } /// /// Determines whether flushes in this texture group should be tracked. /// Incompatible overlaps may need data from this texture to flush tracked for it to be visible to them. /// /// True if flushes should be tracked, false otherwise private bool ShouldFlushTriggerTracking() { foreach (var overlap in _incompatibleOverlaps) { if (overlap.Group._flushIncompatibleOverlaps) { return true; } } return false; } /// /// Gets data from the host GPU, and flushes a slice to guest memory. /// /// /// This method should be used to retrieve data that was modified by the host GPU. /// This is not cheap, avoid doing that unless strictly needed. /// When possible, the data is written directly into guest memory, rather than copied. /// /// True if writing the texture data is tracked, false otherwise /// The index of the slice to flush /// The specific host texture to flush. Defaults to the storage texture private void FlushTextureDataSliceToGuest(bool tracked, int sliceIndex, ITexture texture = null) { (int layer, int level) = GetLayerLevelForView(sliceIndex); int offset = _allOffsets[sliceIndex]; int endOffset = Math.Min(offset + _sliceSizes[level], (int)Storage.Size); int size = endOffset - offset; using WritableRegion region = _physicalMemory.GetWritableRegion(Storage.Range.Slice((ulong)offset, (ulong)size), tracked); Storage.GetTextureDataSliceFromGpu(region.Memory.Span, layer, level, tracked, texture); } /// /// Gets and flushes a number of slices of the storage texture to guest memory. /// /// True if writing the texture data is tracked, false otherwise /// The first slice to flush /// The slice to finish flushing on (exclusive) /// The specific host texture to flush. Defaults to the storage texture private void FlushSliceRange(bool tracked, int sliceStart, int sliceEnd, ITexture texture = null) { for (int i = sliceStart; i < sliceEnd; i++) { FlushTextureDataSliceToGuest(tracked, i, texture); } } /// /// Flush modified ranges for a given texture. /// /// The texture being used /// True if the flush writes should be tracked, false otherwise /// True if data was flushed, false otherwise public bool FlushModified(Texture texture, bool tracked) { tracked = tracked || ShouldFlushTriggerTracking(); bool flushed = false; EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { int startSlice = 0; int endSlice = 0; bool allModified = true; for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; if (group.Modified) { if (endSlice < group.BaseSlice) { if (endSlice > startSlice) { FlushSliceRange(tracked, startSlice, endSlice); flushed = true; } startSlice = group.BaseSlice; } endSlice = group.BaseSlice + group.SliceCount; if (tracked) { group.Modified = false; foreach (Texture texture in group.Overlaps) { texture.SignalModifiedDirty(); } } } else { allModified = false; } } if (endSlice > startSlice) { if (allModified && !split) { texture.Flush(tracked); } else { FlushSliceRange(tracked, startSlice, endSlice); } flushed = true; } }); Storage.SignalModifiedDirty(); return flushed; } /// /// Clears competing modified flags for all incompatible ranges, if they have possibly been modified. /// /// The texture that has been modified [MethodImpl(MethodImplOptions.AggressiveInlining)] private void ClearIncompatibleOverlaps(Texture texture) { if (_incompatibleOverlapsDirty) { foreach (TextureIncompatibleOverlap incompatible in _incompatibleOverlaps) { incompatible.Group.ClearModified(texture.Range, this); incompatible.Group.SignalIncompatibleOverlapModified(); } _incompatibleOverlapsDirty = false; } } /// /// Signal that a texture in the group has been modified by the GPU. /// /// The texture that has been modified public void SignalModified(Texture texture) { ClearIncompatibleOverlaps(texture); EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; group.SignalModified(_context); } }); } /// /// Signal that a texture in the group is actively bound, or has been unbound by the GPU. /// /// The texture that has been modified /// True if this texture is being bound, false if unbound public void SignalModifying(Texture texture, bool bound) { ClearIncompatibleOverlaps(texture); EvaluateRelevantHandles(texture, (baseHandle, regionCount, split) => { for (int i = 0; i < regionCount; i++) { TextureGroupHandle group = _handles[baseHandle + i]; group.SignalModifying(bound, _context); } }); } /// /// Register a read/write action to flush for a texture group. /// /// The group to register an action for public void RegisterAction(TextureGroupHandle group) { foreach (CpuRegionHandle handle in group.Handles) { handle.RegisterAction((address, size) => FlushAction(group, address, size)); } } /// /// Propagates the mip/layer view flags depending on the texture type. /// When the most granular type of subresource has views, the other type of subresource must be segmented granularly too. /// /// True if the storage has layer views /// True if the storage has mip views /// The input values after propagation private (bool HasLayerViews, bool HasMipViews) PropagateGranularity(bool hasLayerViews, bool hasMipViews) { if (_is3D) { hasMipViews |= hasLayerViews; } else { hasLayerViews |= hasMipViews; } return (hasLayerViews, hasMipViews); } /// /// Evaluate the range of tracking handles which a view texture overlaps with. /// /// The texture to get handles for /// /// A function to be called with the base index of the range of handles for the given texture, and the number of handles it covers. /// This can be called for multiple disjoint ranges, if required. /// private void EvaluateRelevantHandles(Texture texture, HandlesCallbackDelegate callback) { if (texture == Storage || !(_hasMipViews || _hasLayerViews)) { callback(0, _handles.Length); return; } EvaluateRelevantHandles(texture.FirstLayer, texture.FirstLevel, texture.Info.GetSlices(), texture.Info.Levels, callback); } /// /// Evaluate the range of tracking handles which a view texture overlaps with, /// using the view's position and slice/level counts. /// /// The first layer of the texture /// The first level of the texture /// The slice count of the texture /// The level count of the texture /// /// A function to be called with the base index of the range of handles for the given texture, and the number of handles it covers. /// This can be called for multiple disjoint ranges, if required. /// private void EvaluateRelevantHandles(int firstLayer, int firstLevel, int slices, int levels, HandlesCallbackDelegate callback) { int targetLayerHandles = _hasLayerViews ? slices : 1; int targetLevelHandles = _hasMipViews ? levels : 1; if (_is3D) { // Future mip levels come after all layers of the last mip level. Each mipmap has less layers (depth) than the last. if (!_hasLayerViews) { // When there are no layer views, the mips are at a consistent offset. callback(firstLevel, targetLevelHandles); } else { (int levelIndex, int layerCount) = Get3DLevelRange(firstLevel); if (levels > 1 && slices < _layers) { // The given texture only covers some of the depth of multiple mips. (a "depth slice") // Callback with each mip's range separately. // Can assume that the group is fully subdivided (both slices and levels > 1 for storage) while (levels-- > 1) { callback(firstLayer + levelIndex, slices); levelIndex += layerCount; layerCount = Math.Max(layerCount >> 1, 1); slices = Math.Max(layerCount >> 1, 1); } } else { int totalSize = Math.Min(layerCount, slices); while (levels-- > 1) { layerCount = Math.Max(layerCount >> 1, 1); totalSize += layerCount; } callback(firstLayer + levelIndex, totalSize); } } } else { // Future layers come after all mipmaps of the last. int levelHandles = _hasMipViews ? _levels : 1; if (slices > 1 && levels < _levels) { // The given texture only covers some of the mipmaps of multiple slices. (a "mip slice") // Callback with each layer's range separately. // Can assume that the group is fully subdivided (both slices and levels > 1 for storage) for (int i = 0; i < slices; i++) { callback(firstLevel + (firstLayer + i) * levelHandles, targetLevelHandles, true); } } else { callback(firstLevel + firstLayer * levelHandles, targetLevelHandles + (targetLayerHandles - 1) * levelHandles); } } } /// /// Get the range of offsets for a given mip level of a 3D texture. /// /// The level to return /// Start index and count of offsets for the given level private (int Index, int Count) Get3DLevelRange(int level) { int index = 0; int count = _layers; // Depth. Halves with each mip level. while (level-- > 0) { index += count; count = Math.Max(count >> 1, 1); } return (index, count); } /// /// Get view information for a single tracking handle. /// /// The index of the handle /// The layers and levels that the handle covers, and its index in the offsets array private (int BaseLayer, int BaseLevel, int Levels, int Layers, int Index) GetHandleInformation(int handleIndex) { int baseLayer; int baseLevel; int levels = _hasMipViews ? 1 : _levels; int layers = _hasLayerViews ? 1 : _layers; int index; if (_is3D) { if (_hasLayerViews) { // NOTE: Will also have mip views, or only one level in storage. index = handleIndex; baseLevel = 0; int levelLayers = _layers; while (handleIndex >= levelLayers) { handleIndex -= levelLayers; baseLevel++; levelLayers = Math.Max(levelLayers >> 1, 1); } baseLayer = handleIndex; } else { baseLayer = 0; baseLevel = handleIndex; (index, _) = Get3DLevelRange(baseLevel); } } else { baseLevel = _hasMipViews ? handleIndex % _levels : 0; baseLayer = _hasMipViews ? handleIndex / _levels : handleIndex; index = baseLevel + baseLayer * _levels; } return (baseLayer, baseLevel, levels, layers, index); } /// /// Gets the layer and level for a given view. /// /// The index of the view /// The layer and level of the specified view private (int BaseLayer, int BaseLevel) GetLayerLevelForView(int index) { if (_is3D) { int baseLevel = 0; int levelLayers = _layers; while (index >= levelLayers) { index -= levelLayers; baseLevel++; levelLayers = Math.Max(levelLayers >> 1, 1); } return (index, baseLevel); } else { return (index / _levels, index % _levels); } } /// /// Find the byte offset of a given texture relative to the storage. /// /// The texture to locate /// The offset of the texture in bytes public int FindOffset(Texture texture) { return _allOffsets[GetOffsetIndex(texture.FirstLayer, texture.FirstLevel)]; } /// /// Find the offset index of a given layer and level. /// /// The view layer /// The view level /// The offset index of the given layer and level public int GetOffsetIndex(int layer, int level) { if (_is3D) { return layer + Get3DLevelRange(level).Index; } else { return level + layer * _levels; } } /// /// The action to perform when a memory tracking handle is flipped to dirty. /// This notifies overlapping textures that the memory needs to be synchronized. /// /// The handle that a dirty flag was set on private void DirtyAction(TextureGroupHandle groupHandle) { // Notify all textures that belong to this handle. Storage.SignalGroupDirty(); lock (groupHandle.Overlaps) { foreach (Texture overlap in groupHandle.Overlaps) { overlap.SignalGroupDirty(); } } } /// /// Generate a CpuRegionHandle for a given address and size range in CPU VA. /// /// The start address of the tracked region /// The size of the tracked region /// A CpuRegionHandle covering the given range private CpuRegionHandle GenerateHandle(ulong address, ulong size) { return _physicalMemory.BeginTracking(address, size, ResourceKind.Texture); } /// /// Generate a TextureGroupHandle covering a specified range of views. /// /// The start view of the handle /// The number of views to cover /// A TextureGroupHandle covering the given views private TextureGroupHandle GenerateHandles(int viewStart, int views) { int viewEnd = viewStart + views - 1; (_, int lastLevel) = GetLayerLevelForView(viewEnd); int offset = _allOffsets[viewStart]; int endOffset = _allOffsets[viewEnd] + _sliceSizes[lastLevel]; int size = endOffset - offset; var result = new List(); for (int i = 0; i < TextureRange.Count; i++) { MemoryRange item = TextureRange.GetSubRange(i); int subRangeSize = (int)item.Size; int sliceStart = Math.Clamp(offset, 0, subRangeSize); int sliceEnd = Math.Clamp(endOffset, 0, subRangeSize); if (sliceStart != sliceEnd && item.Address != MemoryManager.PteUnmapped) { result.Add(GenerateHandle(item.Address + (ulong)sliceStart, (ulong)(sliceEnd - sliceStart))); } offset -= subRangeSize; endOffset -= subRangeSize; if (endOffset <= 0) { break; } } (int firstLayer, int firstLevel) = GetLayerLevelForView(viewStart); if (_hasLayerViews && _hasMipViews) { size = _sliceSizes[firstLevel]; } offset = _allOffsets[viewStart]; ulong maxSize = Storage.Size - (ulong)offset; var groupHandle = new TextureGroupHandle( this, offset, Math.Min(maxSize, (ulong)size), _views, firstLayer, firstLevel, viewStart, views, result.ToArray()); foreach (CpuRegionHandle handle in result) { handle.RegisterDirtyEvent(() => DirtyAction(groupHandle)); } return groupHandle; } /// /// Update the views in this texture group, rebuilding the memory tracking if required. /// /// The views list of the storage texture /// The texture that has been added, if that is the only change, otherwise null public void UpdateViews(List views, Texture texture) { // This is saved to calculate overlapping views for each handle. _views = views; bool layerViews = _hasLayerViews; bool mipViews = _hasMipViews; bool regionsRebuilt = false; if (!(layerViews && mipViews)) { foreach (Texture view in views) { if (view.Info.GetSlices() < _layers) { layerViews = true; } if (view.Info.Levels < _levels) { mipViews = true; } } (layerViews, mipViews) = PropagateGranularity(layerViews, mipViews); if (layerViews != _hasLayerViews || mipViews != _hasMipViews) { _hasLayerViews = layerViews; _hasMipViews = mipViews; RecalculateHandleRegions(); regionsRebuilt = true; } } if (!regionsRebuilt) { if (texture != null) { int offset = FindOffset(texture); foreach (TextureGroupHandle handle in _handles) { handle.AddOverlap(offset, texture); } } else { // Must update the overlapping views on all handles, but only if they were not just recreated. foreach (TextureGroupHandle handle in _handles) { handle.RecalculateOverlaps(this, views); } } } SignalAllDirty(); } /// /// Removes a view from the group, removing it from all overlap lists. /// /// View to remove from the group public void RemoveView(Texture view) { int offset = FindOffset(view); foreach (TextureGroupHandle handle in _handles) { handle.RemoveOverlap(offset, view); } } /// /// Inherit handle state from an old set of handles, such as modified and dirty flags. /// /// The set of handles to inherit state from /// The set of handles inheriting the state /// The offset of the old handles in relation to the new ones private void InheritHandles(TextureGroupHandle[] oldHandles, TextureGroupHandle[] handles, int relativeOffset) { foreach (var group in handles) { foreach (var handle in group.Handles) { bool dirty = false; foreach (var oldGroup in oldHandles) { if (group.OverlapsWith(oldGroup.Offset + relativeOffset, oldGroup.Size)) { foreach (var oldHandle in oldGroup.Handles) { if (handle.OverlapsWith(oldHandle.Address, oldHandle.Size)) { dirty |= oldHandle.Dirty; } } group.Inherit(oldGroup, group.Offset == oldGroup.Offset + relativeOffset); } } if (dirty && !handle.Dirty) { handle.Reprotect(true); } if (group.Modified) { handle.RegisterAction((address, size) => FlushAction(group, address, size)); } } } foreach (var oldGroup in oldHandles) { oldGroup.Modified = false; } } /// /// Inherit state from another texture group. /// /// The texture group to inherit from public void Inherit(TextureGroup other) { bool layerViews = _hasLayerViews || other._hasLayerViews; bool mipViews = _hasMipViews || other._hasMipViews; if (layerViews != _hasLayerViews || mipViews != _hasMipViews) { _hasLayerViews = layerViews; _hasMipViews = mipViews; RecalculateHandleRegions(); } foreach (TextureIncompatibleOverlap incompatible in other._incompatibleOverlaps) { RegisterIncompatibleOverlap(incompatible, false); incompatible.Group._incompatibleOverlaps.RemoveAll(overlap => overlap.Group == other); } int relativeOffset = Storage.Range.FindOffset(other.Storage.Range); InheritHandles(other._handles, _handles, relativeOffset); } /// /// Replace the current handles with the new handles. It is assumed that the new handles start dirty. /// The dirty flags from the previous handles will be kept. /// /// The handles to replace the current handles with /// True if the storage memory range changed since the last region handle generation private void ReplaceHandles(TextureGroupHandle[] handles, bool rangeChanged) { if (_handles != null) { // When replacing handles, they should start as non-dirty. foreach (TextureGroupHandle groupHandle in handles) { if (rangeChanged) { // When the storage range changes, this becomes a little different. // If a range does not match one in the original, treat it as modified. // It has been newly mapped and its data must be synchronized. if (groupHandle.Handles.Length == 0) { continue; } foreach (var oldGroup in _handles) { if (!groupHandle.OverlapsWith(oldGroup.Offset, oldGroup.Size)) { continue; } foreach (CpuRegionHandle handle in groupHandle.Handles) { bool hasMatch = false; foreach (var oldHandle in oldGroup.Handles) { if (oldHandle.RangeEquals(handle)) { hasMatch = true; break; } } if (hasMatch) { handle.Reprotect(); } } } } else { foreach (CpuRegionHandle handle in groupHandle.Handles) { handle.Reprotect(); } } } InheritHandles(_handles, handles, 0); foreach (var oldGroup in _handles) { foreach (var oldHandle in oldGroup.Handles) { oldHandle.Dispose(); } } } _handles = handles; _loadNeeded = new bool[_handles.Length]; } /// /// Recalculate handle regions for this texture group, and inherit existing state into the new handles. /// /// True if the storage memory range changed since the last region handle generation private void RecalculateHandleRegions(bool rangeChanged = false) { TextureGroupHandle[] handles; if (!(_hasMipViews || _hasLayerViews)) { // Single dirty region. var cpuRegionHandles = new CpuRegionHandle[TextureRange.Count]; int count = 0; for (int i = 0; i < TextureRange.Count; i++) { var currentRange = TextureRange.GetSubRange(i); if (currentRange.Address != MemoryManager.PteUnmapped) { cpuRegionHandles[count++] = GenerateHandle(currentRange.Address, currentRange.Size); } } if (count != TextureRange.Count) { Array.Resize(ref cpuRegionHandles, count); } var groupHandle = new TextureGroupHandle(this, 0, Storage.Size, _views, 0, 0, 0, _allOffsets.Length, cpuRegionHandles); foreach (CpuRegionHandle handle in cpuRegionHandles) { handle.RegisterDirtyEvent(() => DirtyAction(groupHandle)); } handles = new TextureGroupHandle[] { groupHandle }; } else { // Get views for the host texture. // It's worth noting that either the texture has layer views or mip views when getting to this point, which simplifies the logic a little. // Depending on if the texture is 3d, either the mip views imply that layer views are present (2d) or the other way around (3d). // This is enforced by the way the texture matched as a view, so we don't need to check. int layerHandles = _hasLayerViews ? _layers : 1; int levelHandles = _hasMipViews ? _levels : 1; int handleIndex = 0; if (_is3D) { var handlesList = new List(); for (int i = 0; i < levelHandles; i++) { for (int j = 0; j < layerHandles; j++) { (int viewStart, int views) = Get3DLevelRange(i); viewStart += j; views = _hasLayerViews ? 1 : views; // A layer view is also a mip view. handlesList.Add(GenerateHandles(viewStart, views)); } layerHandles = Math.Max(1, layerHandles >> 1); } handles = handlesList.ToArray(); } else { handles = new TextureGroupHandle[layerHandles * levelHandles]; for (int i = 0; i < layerHandles; i++) { for (int j = 0; j < levelHandles; j++) { int viewStart = j + i * _levels; int views = _hasMipViews ? 1 : _levels; // A mip view is also a layer view. handles[handleIndex++] = GenerateHandles(viewStart, views); } } } } ReplaceHandles(handles, rangeChanged); } /// /// Regenerates handles when the storage range has been remapped. /// This forces the regions to be fully subdivided. /// public void RangeChanged() { _hasLayerViews = true; _hasMipViews = true; RecalculateHandleRegions(true); SignalAllDirty(); } /// /// Ensure that there is a handle for each potential texture view. Required for copy dependencies to work. /// private void EnsureFullSubdivision() { if (!(_hasLayerViews && _hasMipViews)) { _hasLayerViews = true; _hasMipViews = true; RecalculateHandleRegions(); } } /// /// Create a copy dependency between this texture group, and a texture at a given layer/level offset. /// /// The view compatible texture to create a dependency to /// The base layer of the given texture relative to the storage /// The base level of the given texture relative to the storage /// True if this texture is first copied to the given one, false for the opposite direction public void CreateCopyDependency(Texture other, int firstLayer, int firstLevel, bool copyTo) { TextureGroup otherGroup = other.Group; EnsureFullSubdivision(); otherGroup.EnsureFullSubdivision(); // Get the location of each texture within its storage, so we can find the handles to apply the dependency to. // This can consist of multiple disjoint regions, for example if this is a mip slice of an array texture. var targetRange = new List<(int BaseHandle, int RegionCount)>(); var otherRange = new List<(int BaseHandle, int RegionCount)>(); EvaluateRelevantHandles(firstLayer, firstLevel, other.Info.GetSlices(), other.Info.Levels, (baseHandle, regionCount, split) => targetRange.Add((baseHandle, regionCount))); otherGroup.EvaluateRelevantHandles(other, (baseHandle, regionCount, split) => otherRange.Add((baseHandle, regionCount))); int targetIndex = 0; int otherIndex = 0; (int Handle, int RegionCount) targetRegion = (0, 0); (int Handle, int RegionCount) otherRegion = (0, 0); while (true) { if (targetRegion.RegionCount == 0) { if (targetIndex >= targetRange.Count) { break; } targetRegion = targetRange[targetIndex++]; } if (otherRegion.RegionCount == 0) { if (otherIndex >= otherRange.Count) { break; } otherRegion = otherRange[otherIndex++]; } TextureGroupHandle handle = _handles[targetRegion.Handle++]; TextureGroupHandle otherHandle = other.Group._handles[otherRegion.Handle++]; targetRegion.RegionCount--; otherRegion.RegionCount--; handle.CreateCopyDependency(otherHandle, copyTo); // If "copyTo" is true, this texture must copy to the other. // Otherwise, it must copy to this texture. if (copyTo) { otherHandle.Copy(_context, handle); } else { handle.Copy(_context, otherHandle); } } } /// /// Creates a copy dependency to another texture group, where handles overlap. /// Scans through all handles to find compatible patches in the other group. /// /// The texture group that overlaps this one /// True if this texture is first copied to the given one, false for the opposite direction public void CreateCopyDependency(TextureGroup other, bool copyTo) { for (int i = 0; i < _allOffsets.Length; i++) { (int layer, int level) = GetLayerLevelForView(i); MultiRange handleRange = Storage.Range.Slice((ulong)_allOffsets[i], 1); ulong handleBase = handleRange.GetSubRange(0).Address; for (int j = 0; j < other._handles.Length; j++) { (int otherLayer, int otherLevel) = other.GetLayerLevelForView(j); MultiRange otherHandleRange = other.Storage.Range.Slice((ulong)other._allOffsets[j], 1); ulong otherHandleBase = otherHandleRange.GetSubRange(0).Address; if (handleBase == otherHandleBase) { // Check if the two sizes are compatible. TextureInfo info = Storage.Info; TextureInfo otherInfo = other.Storage.Info; if (TextureCompatibility.ViewLayoutCompatible(info, otherInfo, level, otherLevel) && TextureCompatibility.CopySizeMatches(info, otherInfo, level, otherLevel)) { // These textures are copy compatible. Create the dependency. EnsureFullSubdivision(); other.EnsureFullSubdivision(); TextureGroupHandle handle = _handles[i]; TextureGroupHandle otherHandle = other._handles[j]; handle.CreateCopyDependency(otherHandle, copyTo); // If "copyTo" is true, this texture must copy to the other. // Otherwise, it must copy to this texture. if (copyTo) { otherHandle.Copy(_context, handle); } else { handle.Copy(_context, otherHandle); } } } } } } /// /// Registers another texture group as an incompatible overlap, if not already registered. /// /// The texture group to add to the incompatible overlaps list /// True if the overlap should register copy dependencies public void RegisterIncompatibleOverlap(TextureIncompatibleOverlap other, bool copy) { if (!_incompatibleOverlaps.Exists(overlap => overlap.Group == other.Group)) { if (copy && other.Compatibility == TextureViewCompatibility.LayoutIncompatible) { // Any of the group's views may share compatibility, even if the parents do not fully. CreateCopyDependency(other.Group, false); } _incompatibleOverlaps.Add(other); other.Group._incompatibleOverlaps.Add(new TextureIncompatibleOverlap(this, other.Compatibility)); } other.Group.SignalIncompatibleOverlapModified(); SignalIncompatibleOverlapModified(); } /// /// Clear modified flags in the given range. /// This will stop any GPU written data from flushing or copying to dependent textures. /// /// The range to clear modified flags in /// Ignore handles that have a copy dependency to the specified group public void ClearModified(MultiRange range, TextureGroup ignore = null) { TextureGroupHandle[] handles = _handles; foreach (TextureGroupHandle handle in handles) { // Handles list is not modified by another thread, only replaced, so this is thread safe. // Remove modified flags from all overlapping handles, so that the textures don't flush to unmapped/remapped GPU memory. MultiRange subRange = Storage.Range.Slice((ulong)handle.Offset, (ulong)handle.Size); if (range.OverlapsWith(subRange)) { if ((ignore == null || !handle.HasDependencyTo(ignore)) && handle.Modified) { handle.Modified = false; Storage.SignalModifiedDirty(); lock (handle.Overlaps) { foreach (Texture texture in handle.Overlaps) { texture.SignalModifiedDirty(); } } } } } Storage.SignalModifiedDirty(); if (_views != null) { foreach (Texture texture in _views) { texture.SignalModifiedDirty(); } } } /// /// A flush has been requested on a tracked region. Flush texture data for the given handle. /// /// The handle this flush action is for /// The address of the flushing memory access /// The size of the flushing memory access public void FlushAction(TextureGroupHandle handle, ulong address, ulong size) { // If the page size is larger than 4KB, we will have a lot of false positives for flushing. // Let's avoid flushing textures that are unlikely to be read from CPU to improve performance // on those platforms. if (!_physicalMemory.Supports4KBPages && !Storage.Info.IsLinear && !_context.IsGpuThread()) { return; } // There is a small gap here where the action is removed but _actionRegistered is still 1. // In this case it will skip registering the action, but here we are already handling it, // so there shouldn't be any issue as it's the same handler for all actions. handle.ClearActionRegistered(); if (!handle.Modified) { return; } bool isGpuThread = _context.IsGpuThread(); if (isGpuThread) { // No need to wait if we're on the GPU thread, we can just clear the modified flag immediately. handle.Modified = false; } _context.Renderer.BackgroundContextAction(() => { if (!isGpuThread) { handle.Sync(_context); } Storage.SignalModifiedDirty(); lock (handle.Overlaps) { foreach (Texture texture in handle.Overlaps) { texture.SignalModifiedDirty(); } } if (TextureCompatibility.CanTextureFlush(Storage.Info, _context.Capabilities)) { FlushSliceRange(false, handle.BaseSlice, handle.BaseSlice + handle.SliceCount, Storage.GetFlushTexture()); } }); } /// /// Dispose this texture group, disposing all related memory tracking handles. /// public void Dispose() { foreach (TextureGroupHandle group in _handles) { group.Dispose(); } foreach (TextureIncompatibleOverlap incompatible in _incompatibleOverlaps) { incompatible.Group._incompatibleOverlaps.RemoveAll(overlap => overlap.Group == this); } } } }