2018-03-18 21:15:05 +01:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-08-11 00:39:37 +02:00
|
|
|
#include "common/assert.h"
|
2019-09-26 01:43:23 +02:00
|
|
|
#include "common/microprofile.h"
|
2019-02-14 18:42:58 +01:00
|
|
|
#include "core/core.h"
|
2019-01-30 03:49:18 +01:00
|
|
|
#include "core/core_timing.h"
|
2020-02-10 15:32:51 +01:00
|
|
|
#include "core/core_timing_util.h"
|
2020-03-25 03:58:49 +01:00
|
|
|
#include "core/frontend/emu_window.h"
|
2019-01-30 03:49:18 +01:00
|
|
|
#include "core/memory.h"
|
2020-04-20 19:20:52 +02:00
|
|
|
#include "core/settings.h"
|
2018-03-18 21:15:05 +01:00
|
|
|
#include "video_core/engines/fermi_2d.h"
|
2019-01-23 00:49:31 +01:00
|
|
|
#include "video_core/engines/kepler_compute.h"
|
2018-09-08 22:58:20 +02:00
|
|
|
#include "video_core/engines/kepler_memory.h"
|
2018-03-18 21:15:05 +01:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2018-06-11 00:02:33 +02:00
|
|
|
#include "video_core/engines/maxwell_dma.h"
|
2018-03-18 21:15:05 +01:00
|
|
|
#include "video_core/gpu.h"
|
2019-03-04 05:54:16 +01:00
|
|
|
#include "video_core/memory_manager.h"
|
2019-01-08 05:32:02 +01:00
|
|
|
#include "video_core/renderer_base.h"
|
2020-03-25 03:58:49 +01:00
|
|
|
#include "video_core/video_core.h"
|
2018-03-18 21:15:05 +01:00
|
|
|
|
|
|
|
namespace Tegra {
|
|
|
|
|
2019-09-26 01:43:23 +02:00
|
|
|
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
|
|
|
|
2020-03-25 03:58:49 +01:00
|
|
|
GPU::GPU(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& renderer_, bool is_async)
|
|
|
|
: system{system}, renderer{std::move(renderer_)}, is_async{is_async} {
|
|
|
|
auto& rasterizer{renderer->Rasterizer()};
|
2020-02-15 23:47:15 +01:00
|
|
|
memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer);
|
2020-04-19 22:12:06 +02:00
|
|
|
dma_pusher = std::make_unique<Tegra::DmaPusher>(system, *this);
|
2019-02-16 04:05:17 +01:00
|
|
|
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
|
2019-08-30 20:08:00 +02:00
|
|
|
fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer);
|
2019-04-23 01:05:43 +02:00
|
|
|
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, rasterizer, *memory_manager);
|
2019-08-30 20:08:00 +02:00
|
|
|
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
|
2019-04-23 00:50:56 +02:00
|
|
|
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
|
2018-03-18 21:15:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
GPU::~GPU() = default;
|
|
|
|
|
2018-08-28 16:57:56 +02:00
|
|
|
Engines::Maxwell3D& GPU::Maxwell3D() {
|
2018-07-21 00:31:36 +02:00
|
|
|
return *maxwell_3d;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:57:56 +02:00
|
|
|
const Engines::Maxwell3D& GPU::Maxwell3D() const {
|
2018-03-22 21:19:35 +01:00
|
|
|
return *maxwell_3d;
|
|
|
|
}
|
|
|
|
|
2019-07-15 03:25:13 +02:00
|
|
|
Engines::KeplerCompute& GPU::KeplerCompute() {
|
|
|
|
return *kepler_compute;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Engines::KeplerCompute& GPU::KeplerCompute() const {
|
|
|
|
return *kepler_compute;
|
|
|
|
}
|
|
|
|
|
2018-08-28 16:57:56 +02:00
|
|
|
MemoryManager& GPU::MemoryManager() {
|
|
|
|
return *memory_manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
const MemoryManager& GPU::MemoryManager() const {
|
|
|
|
return *memory_manager;
|
|
|
|
}
|
|
|
|
|
2018-11-24 05:20:56 +01:00
|
|
|
DmaPusher& GPU::DmaPusher() {
|
|
|
|
return *dma_pusher;
|
|
|
|
}
|
|
|
|
|
|
|
|
const DmaPusher& GPU::DmaPusher() const {
|
|
|
|
return *dma_pusher;
|
|
|
|
}
|
|
|
|
|
2019-12-30 13:03:20 +01:00
|
|
|
void GPU::WaitFence(u32 syncpoint_id, u32 value) {
|
2019-09-26 01:43:23 +02:00
|
|
|
// Synced GPU, is always in sync
|
|
|
|
if (!is_async) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
MICROPROFILE_SCOPE(GPU_wait);
|
2019-12-30 13:03:20 +01:00
|
|
|
std::unique_lock lock{sync_mutex};
|
|
|
|
sync_cv.wait(lock, [=]() { return syncpoints[syncpoint_id].load() >= value; });
|
2019-09-26 01:43:23 +02:00
|
|
|
}
|
|
|
|
|
2019-06-07 18:56:30 +02:00
|
|
|
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
|
|
|
|
syncpoints[syncpoint_id]++;
|
2019-06-18 22:58:29 +02:00
|
|
|
std::lock_guard lock{sync_mutex};
|
2019-12-30 13:03:20 +01:00
|
|
|
sync_cv.notify_all();
|
2019-06-12 13:52:49 +02:00
|
|
|
if (!syncpt_interrupts[syncpoint_id].empty()) {
|
2019-06-07 18:56:30 +02:00
|
|
|
u32 value = syncpoints[syncpoint_id].load();
|
2019-06-12 13:52:49 +02:00
|
|
|
auto it = syncpt_interrupts[syncpoint_id].begin();
|
|
|
|
while (it != syncpt_interrupts[syncpoint_id].end()) {
|
|
|
|
if (value >= *it) {
|
|
|
|
TriggerCpuInterrupt(syncpoint_id, *it);
|
|
|
|
it = syncpt_interrupts[syncpoint_id].erase(it);
|
2019-06-07 18:56:30 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const {
|
|
|
|
return syncpoints[syncpoint_id].load();
|
|
|
|
}
|
|
|
|
|
2019-06-12 13:52:49 +02:00
|
|
|
void GPU::RegisterSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
2019-06-19 02:53:21 +02:00
|
|
|
auto& interrupt = syncpt_interrupts[syncpoint_id];
|
|
|
|
bool contains = std::any_of(interrupt.begin(), interrupt.end(),
|
|
|
|
[value](u32 in_value) { return in_value == value; });
|
|
|
|
if (contains) {
|
|
|
|
return;
|
2019-06-08 02:41:06 +02:00
|
|
|
}
|
2019-06-12 13:52:49 +02:00
|
|
|
syncpt_interrupts[syncpoint_id].emplace_back(value);
|
2019-06-07 18:56:30 +02:00
|
|
|
}
|
|
|
|
|
2019-06-17 21:27:42 +02:00
|
|
|
bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
|
2019-06-18 22:58:29 +02:00
|
|
|
std::lock_guard lock{sync_mutex};
|
2019-06-19 02:53:21 +02:00
|
|
|
auto& interrupt = syncpt_interrupts[syncpoint_id];
|
|
|
|
const auto iter =
|
|
|
|
std::find_if(interrupt.begin(), interrupt.end(),
|
|
|
|
[value](u32 interrupt_value) { return value == interrupt_value; });
|
|
|
|
|
|
|
|
if (iter == interrupt.end()) {
|
|
|
|
return false;
|
2019-06-08 03:13:20 +02:00
|
|
|
}
|
2019-06-19 02:53:21 +02:00
|
|
|
interrupt.erase(iter);
|
|
|
|
return true;
|
2019-06-08 03:13:20 +02:00
|
|
|
}
|
|
|
|
|
2020-04-16 18:29:53 +02:00
|
|
|
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
|
2020-02-20 16:55:32 +01:00
|
|
|
std::unique_lock lck{flush_request_mutex};
|
|
|
|
const u64 fence = ++last_flush_fence;
|
|
|
|
flush_requests.emplace_back(fence, addr, size);
|
|
|
|
return fence;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GPU::TickWork() {
|
|
|
|
std::unique_lock lck{flush_request_mutex};
|
|
|
|
while (!flush_requests.empty()) {
|
|
|
|
auto& request = flush_requests.front();
|
|
|
|
const u64 fence = request.fence;
|
2020-04-16 18:29:53 +02:00
|
|
|
const VAddr addr = request.addr;
|
2020-02-20 16:55:32 +01:00
|
|
|
const std::size_t size = request.size;
|
|
|
|
flush_requests.pop_front();
|
|
|
|
flush_request_mutex.unlock();
|
|
|
|
renderer->Rasterizer().FlushRegion(addr, size);
|
|
|
|
current_flush_fence.store(fence);
|
|
|
|
flush_request_mutex.lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-10 15:32:51 +01:00
|
|
|
u64 GPU::GetTicks() const {
|
2020-02-13 23:16:07 +01:00
|
|
|
// This values were reversed engineered by fincs from NVN
|
|
|
|
// The gpu clock is reported in units of 385/625 nanoseconds
|
|
|
|
constexpr u64 gpu_ticks_num = 384;
|
|
|
|
constexpr u64 gpu_ticks_den = 625;
|
|
|
|
|
2020-02-10 15:32:51 +01:00
|
|
|
const u64 cpu_ticks = system.CoreTiming().GetTicks();
|
2020-04-20 19:20:52 +02:00
|
|
|
u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
|
|
|
|
if (Settings::values.use_fast_gpu_time) {
|
|
|
|
nanoseconds /= 256;
|
|
|
|
}
|
2020-02-13 23:16:07 +01:00
|
|
|
const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
|
|
|
|
const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
|
|
|
|
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
|
2020-02-10 15:32:51 +01:00
|
|
|
}
|
|
|
|
|
2019-07-26 20:20:43 +02:00
|
|
|
void GPU::FlushCommands() {
|
2020-03-25 03:58:49 +01:00
|
|
|
renderer->Rasterizer().FlushCommands();
|
2019-07-26 20:20:43 +02:00
|
|
|
}
|
|
|
|
|
2020-02-16 14:51:37 +01:00
|
|
|
void GPU::SyncGuestHost() {
|
|
|
|
renderer->Rasterizer().SyncGuestHost();
|
|
|
|
}
|
2020-02-16 21:24:37 +01:00
|
|
|
|
|
|
|
void GPU::OnCommandListEnd() {
|
2020-02-18 03:29:04 +01:00
|
|
|
renderer->Rasterizer().ReleaseFences();
|
2020-02-16 21:24:37 +01:00
|
|
|
}
|
2019-01-30 03:49:18 +01:00
|
|
|
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
|
|
|
|
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
|
|
|
|
// So the values you see in docs might be multiplied by 4.
|
2018-11-24 05:20:56 +01:00
|
|
|
enum class BufferMethods {
|
2019-01-30 03:49:18 +01:00
|
|
|
BindObject = 0x0,
|
|
|
|
Nop = 0x2,
|
|
|
|
SemaphoreAddressHigh = 0x4,
|
|
|
|
SemaphoreAddressLow = 0x5,
|
|
|
|
SemaphoreSequence = 0x6,
|
|
|
|
SemaphoreTrigger = 0x7,
|
|
|
|
NotifyIntr = 0x8,
|
|
|
|
WrcacheFlush = 0x9,
|
|
|
|
Unk28 = 0xA,
|
2019-07-18 14:54:42 +02:00
|
|
|
UnkCacheFlush = 0xB,
|
2019-01-30 03:49:18 +01:00
|
|
|
RefCnt = 0x14,
|
|
|
|
SemaphoreAcquire = 0x1A,
|
|
|
|
SemaphoreRelease = 0x1B,
|
2019-07-18 14:54:42 +02:00
|
|
|
FenceValue = 0x1C,
|
|
|
|
FenceAction = 0x1D,
|
2019-01-30 03:49:18 +01:00
|
|
|
Unk78 = 0x1E,
|
|
|
|
Unk7c = 0x1F,
|
|
|
|
Yield = 0x20,
|
|
|
|
NonPullerMethods = 0x40,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum class GpuSemaphoreOperation {
|
|
|
|
AcquireEqual = 0x1,
|
|
|
|
WriteLong = 0x2,
|
|
|
|
AcquireGequal = 0x4,
|
|
|
|
AcquireMask = 0x8,
|
2018-11-24 05:20:56 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
void GPU::CallMethod(const MethodCall& method_call) {
|
2018-12-01 08:05:19 +01:00
|
|
|
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
|
|
|
|
method_call.subchannel);
|
2018-11-24 05:20:56 +01:00
|
|
|
|
|
|
|
ASSERT(method_call.subchannel < bound_engines.size());
|
|
|
|
|
2020-04-20 08:16:56 +02:00
|
|
|
if (ExecuteMethodOnEngine(method_call.method)) {
|
2019-01-30 03:49:18 +01:00
|
|
|
CallEngineMethod(method_call);
|
|
|
|
} else {
|
|
|
|
CallPullerMethod(method_call);
|
2018-11-24 05:20:56 +01:00
|
|
|
}
|
2019-01-30 03:49:18 +01:00
|
|
|
}
|
|
|
|
|
2020-04-20 08:16:56 +02:00
|
|
|
void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, u32 methods_pending) {
|
|
|
|
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method,
|
|
|
|
subchannel);
|
|
|
|
|
|
|
|
ASSERT(subchannel < bound_engines.size());
|
|
|
|
|
|
|
|
if (ExecuteMethodOnEngine(method)) {
|
|
|
|
CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
|
|
|
|
} else {
|
|
|
|
for (std::size_t i = 0; i < amount; i++) {
|
|
|
|
CallPullerMethod({method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool GPU::ExecuteMethodOnEngine(u32 method) {
|
|
|
|
const auto buffer_method = static_cast<BufferMethods>(method);
|
|
|
|
return buffer_method >= BufferMethods::NonPullerMethods;
|
2019-01-30 03:49:18 +01:00
|
|
|
}
|
2018-11-24 05:20:56 +01:00
|
|
|
|
2019-01-30 03:49:18 +01:00
|
|
|
void GPU::CallPullerMethod(const MethodCall& method_call) {
|
|
|
|
regs.reg_array[method_call.method] = method_call.argument;
|
|
|
|
const auto method = static_cast<BufferMethods>(method_call.method);
|
|
|
|
|
|
|
|
switch (method) {
|
|
|
|
case BufferMethods::BindObject: {
|
|
|
|
ProcessBindMethod(method_call);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BufferMethods::Nop:
|
|
|
|
case BufferMethods::SemaphoreAddressHigh:
|
|
|
|
case BufferMethods::SemaphoreAddressLow:
|
|
|
|
case BufferMethods::SemaphoreSequence:
|
|
|
|
case BufferMethods::RefCnt:
|
2019-07-18 14:54:42 +02:00
|
|
|
case BufferMethods::UnkCacheFlush:
|
|
|
|
case BufferMethods::WrcacheFlush:
|
|
|
|
case BufferMethods::FenceValue:
|
|
|
|
case BufferMethods::FenceAction:
|
2019-01-30 03:49:18 +01:00
|
|
|
break;
|
|
|
|
case BufferMethods::SemaphoreTrigger: {
|
|
|
|
ProcessSemaphoreTriggerMethod();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BufferMethods::NotifyIntr: {
|
|
|
|
// TODO(Kmather73): Research and implement this method.
|
|
|
|
LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BufferMethods::Unk28: {
|
|
|
|
// TODO(Kmather73): Research and implement this method.
|
|
|
|
LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BufferMethods::SemaphoreAcquire: {
|
|
|
|
ProcessSemaphoreAcquire();
|
|
|
|
break;
|
2018-12-04 05:52:18 +01:00
|
|
|
}
|
2019-01-30 03:49:18 +01:00
|
|
|
case BufferMethods::SemaphoreRelease: {
|
|
|
|
ProcessSemaphoreRelease();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BufferMethods::Yield: {
|
|
|
|
// TODO(Kmather73): Research and implement this method.
|
|
|
|
LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented",
|
|
|
|
static_cast<u32>(method));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-12-04 05:52:18 +01:00
|
|
|
|
2019-01-30 03:49:18 +01:00
|
|
|
void GPU::CallEngineMethod(const MethodCall& method_call) {
|
2018-11-24 05:20:56 +01:00
|
|
|
const EngineID engine = bound_engines[method_call.subchannel];
|
|
|
|
|
|
|
|
switch (engine) {
|
|
|
|
case EngineID::FERMI_TWOD_A:
|
|
|
|
fermi_2d->CallMethod(method_call);
|
|
|
|
break;
|
|
|
|
case EngineID::MAXWELL_B:
|
|
|
|
maxwell_3d->CallMethod(method_call);
|
|
|
|
break;
|
2019-01-23 00:49:31 +01:00
|
|
|
case EngineID::KEPLER_COMPUTE_B:
|
|
|
|
kepler_compute->CallMethod(method_call);
|
2018-11-24 05:20:56 +01:00
|
|
|
break;
|
|
|
|
case EngineID::MAXWELL_DMA_COPY_A:
|
|
|
|
maxwell_dma->CallMethod(method_call);
|
|
|
|
break;
|
|
|
|
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
|
|
|
kepler_memory->CallMethod(method_call);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unimplemented engine");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 08:16:56 +02:00
|
|
|
void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, u32 methods_pending) {
|
|
|
|
const EngineID engine = bound_engines[subchannel];
|
|
|
|
|
|
|
|
switch (engine) {
|
|
|
|
case EngineID::FERMI_TWOD_A:
|
|
|
|
fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
|
|
break;
|
|
|
|
case EngineID::MAXWELL_B:
|
|
|
|
maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
|
|
break;
|
|
|
|
case EngineID::KEPLER_COMPUTE_B:
|
|
|
|
kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
|
|
break;
|
|
|
|
case EngineID::MAXWELL_DMA_COPY_A:
|
|
|
|
maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
|
|
break;
|
|
|
|
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
|
|
|
kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unimplemented engine");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 03:49:18 +01:00
|
|
|
void GPU::ProcessBindMethod(const MethodCall& method_call) {
|
|
|
|
// Bind the current subchannel to the desired engine id.
|
|
|
|
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
|
|
|
|
method_call.argument);
|
|
|
|
bound_engines[method_call.subchannel] = static_cast<EngineID>(method_call.argument);
|
|
|
|
}
|
|
|
|
|
|
|
|
void GPU::ProcessSemaphoreTriggerMethod() {
|
|
|
|
const auto semaphoreOperationMask = 0xF;
|
|
|
|
const auto op =
|
|
|
|
static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
|
|
|
|
if (op == GpuSemaphoreOperation::WriteLong) {
|
|
|
|
struct Block {
|
|
|
|
u32 sequence;
|
|
|
|
u32 zeros = 0;
|
|
|
|
u64 timestamp;
|
|
|
|
};
|
|
|
|
|
|
|
|
Block block{};
|
|
|
|
block.sequence = regs.semaphore_sequence;
|
|
|
|
// TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
|
|
|
|
// CoreTiming
|
2020-02-10 15:32:51 +01:00
|
|
|
block.timestamp = GetTicks();
|
2019-03-27 17:12:53 +01:00
|
|
|
memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
|
|
|
|
sizeof(block));
|
2019-01-30 03:49:18 +01:00
|
|
|
} else {
|
2019-03-27 17:12:53 +01:00
|
|
|
const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
2019-01-30 03:49:18 +01:00
|
|
|
if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
|
|
|
|
(op == GpuSemaphoreOperation::AcquireGequal &&
|
|
|
|
static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
|
|
|
|
(op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
|
|
|
|
// Nothing to do in this case
|
|
|
|
} else {
|
|
|
|
regs.acquire_source = true;
|
|
|
|
regs.acquire_value = regs.semaphore_sequence;
|
|
|
|
if (op == GpuSemaphoreOperation::AcquireEqual) {
|
|
|
|
regs.acquire_active = true;
|
|
|
|
regs.acquire_mode = false;
|
|
|
|
} else if (op == GpuSemaphoreOperation::AcquireGequal) {
|
|
|
|
regs.acquire_active = true;
|
|
|
|
regs.acquire_mode = true;
|
|
|
|
} else if (op == GpuSemaphoreOperation::AcquireMask) {
|
|
|
|
// TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
|
|
|
|
// semaphore_sequence, gives a non-0 result
|
|
|
|
LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
|
|
|
|
} else {
|
|
|
|
LOG_ERROR(HW_GPU, "Invalid semaphore operation");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GPU::ProcessSemaphoreRelease() {
|
2019-03-27 17:12:53 +01:00
|
|
|
memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release);
|
2019-01-30 03:49:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void GPU::ProcessSemaphoreAcquire() {
|
2019-03-27 17:12:53 +01:00
|
|
|
const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
|
2019-01-30 03:49:18 +01:00
|
|
|
const auto value = regs.semaphore_acquire;
|
|
|
|
if (word != value) {
|
|
|
|
regs.acquire_active = true;
|
|
|
|
regs.acquire_value = value;
|
|
|
|
// TODO(kemathe73) figure out how to do the acquire_timeout
|
|
|
|
regs.acquire_mode = false;
|
|
|
|
regs.acquire_source = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-18 21:15:05 +01:00
|
|
|
} // namespace Tegra
|