2024-03-08 10:06:48 +01:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project & 2024 suyu Emulator Project
|
2022-04-23 10:59:50 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <array>
|
|
|
|
#include <cstring>
|
2023-04-23 23:47:05 +02:00
|
|
|
#include <functional>
|
2020-02-11 20:02:41 +01:00
|
|
|
#include <iterator>
|
2021-10-01 06:57:02 +02:00
|
|
|
#include <list>
|
2020-02-11 20:02:41 +01:00
|
|
|
#include <memory>
|
2020-02-13 18:28:22 +01:00
|
|
|
#include <mutex>
|
2020-02-11 20:02:41 +01:00
|
|
|
#include <optional>
|
|
|
|
#include <unordered_map>
|
2020-04-15 22:36:14 +02:00
|
|
|
#include <unordered_set>
|
2020-02-11 20:02:41 +01:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
2021-04-15 01:07:40 +02:00
|
|
|
#include "common/settings.h"
|
2024-02-03 22:51:04 +01:00
|
|
|
#include "common/slot_vector.h"
|
2021-11-05 15:52:31 +01:00
|
|
|
#include "video_core/control/channel_state_cache.h"
|
2020-02-11 20:02:41 +01:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2023-12-25 07:32:16 +01:00
|
|
|
#include "video_core/host1x/gpu_device_memory_manager.h"
|
2020-02-11 20:02:41 +01:00
|
|
|
#include "video_core/memory_manager.h"
|
|
|
|
#include "video_core/rasterizer_interface.h"
|
|
|
|
|
2023-08-04 03:32:30 +02:00
|
|
|
namespace VideoCore {
|
|
|
|
enum class QueryType {
|
|
|
|
SamplesPassed,
|
2023-12-19 23:32:31 +01:00
|
|
|
PrimitivesGenerated,
|
|
|
|
TfbPrimitivesWritten,
|
|
|
|
Count,
|
2023-08-04 03:32:30 +02:00
|
|
|
};
|
2023-12-19 23:32:31 +01:00
|
|
|
constexpr std::size_t NumQueryTypes = static_cast<size_t>(QueryType::Count);
|
2023-08-04 03:32:30 +02:00
|
|
|
} // namespace VideoCore
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
namespace VideoCommon {
|
|
|
|
|
2024-02-03 22:51:04 +01:00
|
|
|
using AsyncJobId = Common::SlotId;
|
2023-04-23 21:55:16 +02:00
|
|
|
|
|
|
|
static constexpr AsyncJobId NULL_ASYNC_JOB_ID{0};
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
template <class QueryCache, class HostCounter>
|
|
|
|
class CounterStreamBase {
|
|
|
|
public:
|
2020-12-05 17:40:14 +01:00
|
|
|
explicit CounterStreamBase(QueryCache& cache_, VideoCore::QueryType type_)
|
|
|
|
: cache{cache_}, type{type_} {}
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
/// Resets the stream to zero. It doesn't disable the query after resetting.
|
|
|
|
void Reset() {
|
|
|
|
if (current) {
|
|
|
|
current->EndQuery();
|
|
|
|
|
|
|
|
// Immediately start a new query to avoid disabling its state.
|
|
|
|
current = cache.Counter(nullptr, type);
|
|
|
|
}
|
|
|
|
last = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the current counter slicing as needed.
|
|
|
|
std::shared_ptr<HostCounter> Current() {
|
|
|
|
if (!current) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
current->EndQuery();
|
|
|
|
last = std::move(current);
|
|
|
|
current = cache.Counter(last, type);
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true when the counter stream is enabled.
|
|
|
|
bool IsEnabled() const {
|
2020-02-14 01:11:21 +01:00
|
|
|
return current != nullptr;
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Enables the stream.
|
|
|
|
void Enable() {
|
|
|
|
if (current) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
current = cache.Counter(last, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disables the stream.
|
|
|
|
void Disable() {
|
|
|
|
if (current) {
|
|
|
|
current->EndQuery();
|
|
|
|
}
|
|
|
|
last = std::exchange(current, nullptr);
|
|
|
|
}
|
|
|
|
|
2023-12-19 23:32:31 +01:00
|
|
|
private:
|
2020-02-11 20:02:41 +01:00
|
|
|
QueryCache& cache;
|
|
|
|
const VideoCore::QueryType type;
|
|
|
|
|
|
|
|
std::shared_ptr<HostCounter> current;
|
|
|
|
std::shared_ptr<HostCounter> last;
|
|
|
|
};
|
|
|
|
|
2020-09-10 08:43:30 +02:00
|
|
|
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
|
2023-08-04 03:32:30 +02:00
|
|
|
class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
|
2020-02-11 20:02:41 +01:00
|
|
|
public:
|
2023-08-04 03:32:30 +02:00
|
|
|
explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_,
|
2023-12-25 07:32:16 +01:00
|
|
|
Tegra::MaxwellDeviceMemoryManager& device_memory_)
|
2023-04-23 23:47:05 +02:00
|
|
|
: rasterizer{rasterizer_},
|
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start.
Workarounds for false-positive reports:
- `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`,
because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp)
of how big it thinks objects can be, specifically when dealing with
offset-to-top values used with multiple inheritance. Hopefully this
doesn't have a performance impact.
- `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks
is UB even though it at least arguably isn't. See the link in the
comment for more information.
Fixes for correct reports:
- `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to
avoid UB from pointer overflow (when pointer arithmetic wraps around
the address space).
- `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`;
avoid calling methods on it in this case. (The existing code returns
a garbage reference to a field, which is then passed into
`LoadWatchpointArray`, and apparently it's never used, so it's
harmless in practice but still triggers UBSan.)
- `KAutoObject::Close`: This function calls `this->Destroy()`, which
overwrites the beginning of the object with junk (specifically a free
list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan
complains about a type mismatch because the vtable has been
overwritten, and I believe this is indeed UB. `UnregisterWithKernel`
also loads `m_kernel` from the 'freed' object, which seems to be
technically safe (the overwriting doesn't extend as far as that
field), but seems dubious. Switch to a `static` method and load
`m_kernel` in advance.
2023-07-02 00:00:39 +02:00
|
|
|
// Use reinterpret_cast instead of static_cast as workaround for
|
|
|
|
// UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
|
2023-12-25 07:32:16 +01:00
|
|
|
device_memory{device_memory_},
|
|
|
|
streams{{
|
|
|
|
{CounterStream{reinterpret_cast<QueryCache&>(*this),
|
|
|
|
VideoCore::QueryType::SamplesPassed}},
|
|
|
|
{CounterStream{reinterpret_cast<QueryCache&>(*this),
|
|
|
|
VideoCore::QueryType::PrimitivesGenerated}},
|
|
|
|
{CounterStream{reinterpret_cast<QueryCache&>(*this),
|
|
|
|
VideoCore::QueryType::TfbPrimitivesWritten}},
|
|
|
|
}} {
|
2023-04-23 23:47:05 +02:00
|
|
|
(void)slot_async_jobs.insert(); // Null value
|
2023-04-23 21:55:16 +02:00
|
|
|
}
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2020-04-06 00:39:24 +02:00
|
|
|
void InvalidateRegion(VAddr addr, std::size_t size) {
|
2020-02-13 18:28:22 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 20:02:41 +01:00
|
|
|
FlushAndRemoveRegion(addr, size);
|
|
|
|
}
|
|
|
|
|
2020-04-06 00:39:24 +02:00
|
|
|
void FlushRegion(VAddr addr, std::size_t size) {
|
2020-02-13 18:28:22 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 20:02:41 +01:00
|
|
|
FlushAndRemoveRegion(addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Records a query in GPU mapped memory, potentially marked with a timestamp.
|
|
|
|
* @param gpu_addr GPU address to flush to when the mapped memory is read.
|
|
|
|
* @param type Query type, e.g. SamplesPassed.
|
|
|
|
* @param timestamp Timestamp, when empty the flushed query is assumed to be short.
|
|
|
|
*/
|
|
|
|
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
|
2020-02-13 18:28:22 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2021-11-05 15:52:31 +01:00
|
|
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
2020-06-12 02:24:45 +02:00
|
|
|
ASSERT(cpu_addr);
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2020-06-12 02:24:45 +02:00
|
|
|
CachedQuery* query = TryGet(*cpu_addr);
|
2020-02-11 20:02:41 +01:00
|
|
|
if (!query) {
|
2020-06-12 02:24:45 +02:00
|
|
|
ASSERT_OR_EXECUTE(cpu_addr, return;);
|
2021-11-05 15:52:31 +01:00
|
|
|
u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2020-06-12 02:24:45 +02:00
|
|
|
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
2023-04-23 23:47:05 +02:00
|
|
|
auto result = query->BindCounter(Stream(type).Current(), timestamp);
|
2023-04-23 21:55:16 +02:00
|
|
|
if (result) {
|
|
|
|
auto async_job_id = query->GetAsyncJob();
|
|
|
|
auto& async_job = slot_async_jobs[async_job_id];
|
|
|
|
async_job.collected = true;
|
|
|
|
async_job.value = *result;
|
|
|
|
query->SetAsyncJob(NULL_ASYNC_JOB_ID);
|
2020-04-16 03:03:30 +02:00
|
|
|
}
|
2023-04-23 21:55:16 +02:00
|
|
|
AsyncFlushQuery(query, timestamp, lock);
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
2023-12-19 23:32:31 +01:00
|
|
|
/// Enables all available GPU counters
|
|
|
|
void EnableCounters() {
|
2020-02-11 22:59:44 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2023-12-19 23:32:31 +01:00
|
|
|
for (auto& stream : streams) {
|
|
|
|
stream.Enable();
|
2022-08-20 04:15:23 +02:00
|
|
|
}
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Resets a counter to zero. It doesn't disable the query after resetting.
|
|
|
|
void ResetCounter(VideoCore::QueryType type) {
|
2020-02-11 22:59:44 +01:00
|
|
|
std::unique_lock lock{mutex};
|
2020-02-11 20:02:41 +01:00
|
|
|
Stream(type).Reset();
|
|
|
|
}
|
|
|
|
|
2020-02-11 22:59:44 +01:00
|
|
|
/// Disable all active streams. Expected to be called at the end of a command buffer.
|
|
|
|
void DisableStreams() {
|
|
|
|
std::unique_lock lock{mutex};
|
|
|
|
for (auto& stream : streams) {
|
2023-12-19 23:32:31 +01:00
|
|
|
stream.Disable();
|
2020-02-11 22:59:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
/// Returns a new host counter.
|
|
|
|
std::shared_ptr<HostCounter> Counter(std::shared_ptr<HostCounter> dependency,
|
|
|
|
VideoCore::QueryType type) {
|
|
|
|
return std::make_shared<HostCounter>(static_cast<QueryCache&>(*this), std::move(dependency),
|
|
|
|
type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the counter stream of the specified type.
|
|
|
|
CounterStream& Stream(VideoCore::QueryType type) {
|
|
|
|
return streams[static_cast<std::size_t>(type)];
|
|
|
|
}
|
|
|
|
|
2020-02-14 01:11:21 +01:00
|
|
|
/// Returns the counter stream of the specified type.
|
|
|
|
const CounterStream& Stream(VideoCore::QueryType type) const {
|
|
|
|
return streams[static_cast<std::size_t>(type)];
|
|
|
|
}
|
|
|
|
|
2020-04-15 22:36:14 +02:00
|
|
|
void CommitAsyncFlushes() {
|
2023-04-15 00:03:48 +02:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 18:29:53 +02:00
|
|
|
committed_flushes.push_back(uncommitted_flushes);
|
|
|
|
uncommitted_flushes.reset();
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
|
|
|
|
2020-04-16 18:29:53 +02:00
|
|
|
bool HasUncommittedFlushes() const {
|
2023-04-15 00:03:48 +02:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 18:29:53 +02:00
|
|
|
return uncommitted_flushes != nullptr;
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
|
|
|
|
2020-04-16 18:29:53 +02:00
|
|
|
bool ShouldWaitAsyncFlushes() const {
|
2023-04-15 00:03:48 +02:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 18:29:53 +02:00
|
|
|
if (committed_flushes.empty()) {
|
2020-04-15 22:36:14 +02:00
|
|
|
return false;
|
|
|
|
}
|
2020-04-16 18:29:53 +02:00
|
|
|
return committed_flushes.front() != nullptr;
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void PopAsyncFlushes() {
|
2023-04-15 00:03:48 +02:00
|
|
|
std::unique_lock lock{mutex};
|
2020-04-16 18:29:53 +02:00
|
|
|
if (committed_flushes.empty()) {
|
2020-04-15 22:36:14 +02:00
|
|
|
return;
|
|
|
|
}
|
2020-04-16 18:29:53 +02:00
|
|
|
auto& flush_list = committed_flushes.front();
|
2020-04-15 22:36:14 +02:00
|
|
|
if (!flush_list) {
|
2020-04-16 18:29:53 +02:00
|
|
|
committed_flushes.pop_front();
|
2020-04-15 22:36:14 +02:00
|
|
|
return;
|
|
|
|
}
|
2023-04-23 21:55:16 +02:00
|
|
|
for (AsyncJobId async_job_id : *flush_list) {
|
|
|
|
AsyncJob& async_job = slot_async_jobs[async_job_id];
|
|
|
|
if (!async_job.collected) {
|
|
|
|
FlushAndRemoveRegion(async_job.query_location, 2, true);
|
|
|
|
}
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
2020-04-16 18:29:53 +02:00
|
|
|
committed_flushes.pop_front();
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
private:
|
2023-04-23 21:55:16 +02:00
|
|
|
struct AsyncJob {
|
|
|
|
bool collected = false;
|
|
|
|
u64 value = 0;
|
|
|
|
VAddr query_location = 0;
|
|
|
|
std::optional<u64> timestamp{};
|
|
|
|
};
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
/// Flushes a memory range to guest memory and removes it from the cache.
|
2023-04-23 21:55:16 +02:00
|
|
|
void FlushAndRemoveRegion(VAddr addr, std::size_t size, bool async = false) {
|
2021-04-12 10:51:16 +02:00
|
|
|
const u64 addr_begin = addr;
|
|
|
|
const u64 addr_end = addr_begin + size;
|
|
|
|
const auto in_range = [addr_begin, addr_end](const CachedQuery& query) {
|
2020-04-06 00:39:24 +02:00
|
|
|
const u64 cache_begin = query.GetCpuAddr();
|
2020-02-11 20:02:41 +01:00
|
|
|
const u64 cache_end = cache_begin + query.SizeInBytes();
|
|
|
|
return cache_begin < addr_end && addr_begin < cache_end;
|
|
|
|
};
|
|
|
|
|
2024-03-08 10:06:48 +01:00
|
|
|
const u64 page_end = addr_end >> SUYU_PAGEBITS;
|
|
|
|
for (u64 page = addr_begin >> SUYU_PAGEBITS; page <= page_end; ++page) {
|
2020-02-11 20:02:41 +01:00
|
|
|
const auto& it = cached_queries.find(page);
|
|
|
|
if (it == std::end(cached_queries)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto& contents = it->second;
|
|
|
|
for (auto& query : contents) {
|
|
|
|
if (!in_range(query)) {
|
|
|
|
continue;
|
|
|
|
}
|
2023-04-23 21:55:16 +02:00
|
|
|
AsyncJobId async_job_id = query.GetAsyncJob();
|
|
|
|
auto flush_result = query.Flush(async);
|
|
|
|
if (async_job_id == NULL_ASYNC_JOB_ID) {
|
|
|
|
ASSERT_MSG(false, "This should not be reachable at all");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
AsyncJob& async_job = slot_async_jobs[async_job_id];
|
|
|
|
async_job.collected = true;
|
|
|
|
async_job.value = flush_result;
|
|
|
|
query.SetAsyncJob(NULL_ASYNC_JOB_ID);
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
2021-04-12 10:51:16 +02:00
|
|
|
std::erase_if(contents, in_range);
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
|
|
|
|
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
|
2024-03-08 10:06:48 +01:00
|
|
|
const u64 page = static_cast<u64>(cpu_addr) >> SUYU_PAGEBITS;
|
2020-02-11 20:02:41 +01:00
|
|
|
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
|
|
|
|
host_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tries to a get a cached query. Returns nullptr on failure.
|
2020-04-06 00:39:24 +02:00
|
|
|
CachedQuery* TryGet(VAddr addr) {
|
2024-03-08 10:06:48 +01:00
|
|
|
const u64 page = static_cast<u64>(addr) >> SUYU_PAGEBITS;
|
2020-02-11 20:02:41 +01:00
|
|
|
const auto it = cached_queries.find(page);
|
|
|
|
if (it == std::end(cached_queries)) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
auto& contents = it->second;
|
2020-04-06 01:26:15 +02:00
|
|
|
const auto found = std::find_if(std::begin(contents), std::end(contents),
|
|
|
|
[addr](auto& query) { return query.GetCpuAddr() == addr; });
|
2020-02-11 20:02:41 +01:00
|
|
|
return found != std::end(contents) ? &*found : nullptr;
|
|
|
|
}
|
|
|
|
|
2023-04-23 21:55:16 +02:00
|
|
|
void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
|
|
|
|
std::unique_lock<std::recursive_mutex>& lock) {
|
|
|
|
const AsyncJobId new_async_job_id = slot_async_jobs.insert();
|
2023-04-23 23:47:05 +02:00
|
|
|
{
|
|
|
|
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
|
|
|
query->SetAsyncJob(new_async_job_id);
|
|
|
|
async_job.query_location = query->GetCpuAddr();
|
|
|
|
async_job.collected = false;
|
2023-04-23 21:55:16 +02:00
|
|
|
|
2023-04-23 23:47:05 +02:00
|
|
|
if (!uncommitted_flushes) {
|
|
|
|
uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
|
|
|
|
}
|
|
|
|
uncommitted_flushes->push_back(new_async_job_id);
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
2023-04-23 21:55:16 +02:00
|
|
|
lock.unlock();
|
|
|
|
std::function<void()> operation([this, new_async_job_id, timestamp] {
|
|
|
|
std::unique_lock local_lock{mutex};
|
|
|
|
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
2023-04-28 23:53:46 +02:00
|
|
|
u64 value = async_job.value;
|
|
|
|
VAddr address = async_job.query_location;
|
|
|
|
slot_async_jobs.erase(new_async_job_id);
|
|
|
|
local_lock.unlock();
|
2023-04-23 21:55:16 +02:00
|
|
|
if (timestamp) {
|
|
|
|
u64 timestamp_value = *timestamp;
|
2023-12-25 07:32:16 +01:00
|
|
|
device_memory.WriteBlockUnsafe(address + sizeof(u64), ×tamp_value,
|
|
|
|
sizeof(u64));
|
|
|
|
device_memory.WriteBlockUnsafe(address, &value, sizeof(u64));
|
2023-04-28 23:53:46 +02:00
|
|
|
rasterizer.InvalidateRegion(address, sizeof(u64) * 2,
|
|
|
|
VideoCommon::CacheType::NoQueryCache);
|
2023-04-23 21:55:16 +02:00
|
|
|
} else {
|
2023-04-28 23:53:46 +02:00
|
|
|
u32 small_value = static_cast<u32>(value);
|
2023-12-25 07:32:16 +01:00
|
|
|
device_memory.WriteBlockUnsafe(address, &small_value, sizeof(u32));
|
2023-04-28 23:53:46 +02:00
|
|
|
rasterizer.InvalidateRegion(address, sizeof(u32),
|
|
|
|
VideoCommon::CacheType::NoQueryCache);
|
2023-04-23 21:55:16 +02:00
|
|
|
}
|
|
|
|
});
|
|
|
|
rasterizer.SyncOperation(std::move(operation));
|
2020-04-15 22:36:14 +02:00
|
|
|
}
|
|
|
|
|
2024-03-08 10:06:48 +01:00
|
|
|
static constexpr std::uintptr_t SUYU_PAGESIZE = 4096;
|
|
|
|
static constexpr unsigned SUYU_PAGEBITS = 12;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2024-02-03 22:51:04 +01:00
|
|
|
Common::SlotVector<AsyncJob> slot_async_jobs;
|
2023-04-23 21:55:16 +02:00
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
VideoCore::RasterizerInterface& rasterizer;
|
2023-12-25 07:32:16 +01:00
|
|
|
Tegra::MaxwellDeviceMemoryManager& device_memory;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2023-04-15 00:03:48 +02:00
|
|
|
mutable std::recursive_mutex mutex;
|
2020-02-13 18:28:22 +01:00
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
|
|
|
|
|
|
|
|
std::array<CounterStream, VideoCore::NumQueryTypes> streams;
|
2020-04-15 22:36:14 +02:00
|
|
|
|
2023-04-23 21:55:16 +02:00
|
|
|
std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
|
|
|
|
std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
|
2023-12-19 23:32:31 +01:00
|
|
|
}; // namespace VideoCommon
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
template <class QueryCache, class HostCounter>
|
|
|
|
class HostCounterBase {
|
|
|
|
public:
|
2020-02-11 22:59:44 +01:00
|
|
|
explicit HostCounterBase(std::shared_ptr<HostCounter> dependency_)
|
|
|
|
: dependency{std::move(dependency_)}, depth{dependency ? (dependency->Depth() + 1) : 0} {
|
|
|
|
// Avoid nesting too many dependencies to avoid a stack overflow when these are deleted.
|
2020-02-14 01:11:21 +01:00
|
|
|
constexpr u64 depth_threshold = 96;
|
2020-02-11 22:59:44 +01:00
|
|
|
if (depth > depth_threshold) {
|
|
|
|
depth = 0;
|
|
|
|
base_result = dependency->Query();
|
|
|
|
dependency = nullptr;
|
|
|
|
}
|
|
|
|
}
|
2020-02-14 01:11:21 +01:00
|
|
|
virtual ~HostCounterBase() = default;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
/// Returns the current value of the query.
|
2023-04-23 21:55:16 +02:00
|
|
|
u64 Query(bool async = false) {
|
2020-02-11 20:02:41 +01:00
|
|
|
if (result) {
|
|
|
|
return *result;
|
|
|
|
}
|
|
|
|
|
2023-04-23 21:55:16 +02:00
|
|
|
u64 value = BlockingQuery(async) + base_result;
|
2020-02-11 20:02:41 +01:00
|
|
|
if (dependency) {
|
|
|
|
value += dependency->Query();
|
2020-02-11 22:59:44 +01:00
|
|
|
dependency = nullptr;
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
2020-02-14 01:11:21 +01:00
|
|
|
result = value;
|
|
|
|
return *result;
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true when flushing this query will potentially wait.
|
|
|
|
bool WaitPending() const noexcept {
|
|
|
|
return result.has_value();
|
|
|
|
}
|
|
|
|
|
2020-02-11 22:59:44 +01:00
|
|
|
u64 Depth() const noexcept {
|
|
|
|
return depth;
|
|
|
|
}
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
protected:
|
|
|
|
/// Returns the value of query from the backend API blocking as needed.
|
2023-04-23 21:55:16 +02:00
|
|
|
virtual u64 BlockingQuery(bool async = false) const = 0;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::shared_ptr<HostCounter> dependency; ///< Counter to add to this value.
|
|
|
|
std::optional<u64> result; ///< Filled with the already returned value.
|
2020-02-11 22:59:44 +01:00
|
|
|
u64 depth; ///< Number of nested dependencies.
|
|
|
|
u64 base_result = 0; ///< Equivalent to nested dependencies value.
|
2020-02-11 20:02:41 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
template <class HostCounter>
|
|
|
|
class CachedQueryBase {
|
|
|
|
public:
|
2020-12-05 17:40:14 +01:00
|
|
|
explicit CachedQueryBase(VAddr cpu_addr_, u8* host_ptr_)
|
|
|
|
: cpu_addr{cpu_addr_}, host_ptr{host_ptr_} {}
|
2020-02-14 01:11:21 +01:00
|
|
|
virtual ~CachedQueryBase() = default;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
2020-02-14 01:11:21 +01:00
|
|
|
CachedQueryBase(CachedQueryBase&&) noexcept = default;
|
2020-02-11 20:02:41 +01:00
|
|
|
CachedQueryBase(const CachedQueryBase&) = delete;
|
|
|
|
|
2020-02-14 01:11:21 +01:00
|
|
|
CachedQueryBase& operator=(CachedQueryBase&&) noexcept = default;
|
|
|
|
CachedQueryBase& operator=(const CachedQueryBase&) = delete;
|
2020-02-11 20:02:41 +01:00
|
|
|
|
|
|
|
/// Flushes the query to guest memory.
|
2023-04-23 21:55:16 +02:00
|
|
|
virtual u64 Flush(bool async = false) {
|
2023-03-12 04:10:38 +01:00
|
|
|
// When counter is nullptr it means that it's just been reset. We are supposed to write a
|
2020-02-11 20:02:41 +01:00
|
|
|
// zero in these cases.
|
2023-04-23 21:55:16 +02:00
|
|
|
const u64 value = counter ? counter->Query(async) : 0;
|
2023-04-23 23:47:05 +02:00
|
|
|
if (async) {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
std::memcpy(host_ptr, &value, sizeof(u64));
|
|
|
|
|
|
|
|
if (timestamp) {
|
|
|
|
std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
|
|
|
|
}
|
2023-04-23 21:55:16 +02:00
|
|
|
return value;
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Binds a counter to this query.
|
2023-04-23 23:47:05 +02:00
|
|
|
std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_,
|
|
|
|
std::optional<u64> timestamp_) {
|
2023-04-23 21:55:16 +02:00
|
|
|
std::optional<u64> result{};
|
2020-02-11 20:02:41 +01:00
|
|
|
if (counter) {
|
|
|
|
// If there's an old counter set it means the query is being rewritten by the game.
|
|
|
|
// To avoid losing the data forever, flush here.
|
2023-04-23 21:55:16 +02:00
|
|
|
result = std::make_optional(Flush());
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
counter = std::move(counter_);
|
2023-04-23 23:47:05 +02:00
|
|
|
timestamp = timestamp_;
|
2023-04-23 21:55:16 +02:00
|
|
|
return result;
|
2020-02-11 20:02:41 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 00:39:24 +02:00
|
|
|
VAddr GetCpuAddr() const noexcept {
|
2020-02-11 20:02:41 +01:00
|
|
|
return cpu_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 SizeInBytes() const noexcept {
|
|
|
|
return SizeInBytes(timestamp.has_value());
|
|
|
|
}
|
|
|
|
|
2020-02-14 01:11:21 +01:00
|
|
|
static constexpr u64 SizeInBytes(bool with_timestamp) noexcept {
|
2020-02-11 20:02:41 +01:00
|
|
|
return with_timestamp ? LARGE_QUERY_SIZE : SMALL_QUERY_SIZE;
|
|
|
|
}
|
|
|
|
|
2023-04-23 21:55:16 +02:00
|
|
|
void SetAsyncJob(AsyncJobId assigned_async_job_) {
|
|
|
|
assigned_async_job = assigned_async_job_;
|
|
|
|
}
|
|
|
|
|
|
|
|
AsyncJobId GetAsyncJob() const {
|
|
|
|
return assigned_async_job;
|
|
|
|
}
|
|
|
|
|
2020-02-11 20:02:41 +01:00
|
|
|
protected:
|
|
|
|
/// Returns true when querying the counter may potentially block.
|
|
|
|
bool WaitPending() const noexcept {
|
|
|
|
return counter && counter->WaitPending();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
static constexpr std::size_t SMALL_QUERY_SIZE = 8; // Query size without timestamp.
|
|
|
|
static constexpr std::size_t LARGE_QUERY_SIZE = 16; // Query size with timestamp.
|
|
|
|
static constexpr std::intptr_t TIMESTAMP_OFFSET = 8; // Timestamp offset in a large query.
|
|
|
|
|
|
|
|
VAddr cpu_addr; ///< Guest CPU address.
|
|
|
|
u8* host_ptr; ///< Writable host pointer.
|
|
|
|
std::shared_ptr<HostCounter> counter; ///< Host counter to query, owns the dependency tree.
|
|
|
|
std::optional<u64> timestamp; ///< Timestamp to flush to guest memory.
|
2023-04-23 21:55:16 +02:00
|
|
|
AsyncJobId assigned_async_job;
|
2020-02-11 20:02:41 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VideoCommon
|