nvhost_ctrl: Refactor usage of gpu.LockSync()

This seems to only be used to protect a later gpu function call. So we can move the lock into that call instead.
This commit is contained in:
ameerj 2021-10-01 23:10:55 -04:00
parent 427bf76e62
commit 6c0d902373
3 changed files with 16 additions and 35 deletions

View file

@ -111,7 +111,6 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
event.event->GetWritableEvent().Signal();
return NvResult::Success;
}
auto lock = gpu.LockSync();
const u32 current_syncpoint_value = event.fence.value;
const s32 diff = current_syncpoint_value - params.threshold;
if (diff >= 0) {
@ -132,7 +131,11 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
}
EventState status = events_interface.status[event_id];
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
const bool bad_parameter = status != EventState::Free && status != EventState::Registered;
if (bad_parameter) {
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::BadParameter;
}
events_interface.SetEventStatus(event_id, EventState::Waiting);
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
events_interface.assigned_value[event_id] = target_value;
@ -147,9 +150,6 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::Timeout;
}
std::memcpy(output.data(), &params, sizeof(params));
return NvResult::BadParameter;
}
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
IocCtrlEventRegisterParams params{};

View file

@ -262,6 +262,7 @@ struct GPU::Impl {
}
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
std::lock_guard lock{sync_mutex};
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
bool contains = std::any_of(interrupt.begin(), interrupt.end(),
[value](u32 in_value) { return in_value == value; });
@ -300,10 +301,6 @@ struct GPU::Impl {
return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
}
[[nodiscard]] std::unique_lock<std::mutex> LockSync() {
return std::unique_lock{sync_mutex};
}
[[nodiscard]] bool IsAsync() const {
return is_async;
}
@ -862,10 +859,6 @@ u64 GPU::GetTicks() const {
return impl->GetTicks();
}
std::unique_lock<std::mutex> GPU::LockSync() {
return impl->LockSync();
}
bool GPU::IsAsync() const {
return impl->IsAsync();
}

View file

@ -5,22 +5,12 @@
#pragma once
#include <memory>
#include <mutex>
#include "common/bit_field.h"
#include "common/common_types.h"
#include "video_core/cdma_pusher.h"
#include "video_core/framebuffer_config.h"
using CacheAddr = std::uintptr_t;
[[nodiscard]] inline CacheAddr ToCacheAddr(const void* host_ptr) {
return reinterpret_cast<CacheAddr>(host_ptr);
}
[[nodiscard]] inline u8* FromCacheAddr(CacheAddr cache_addr) {
return reinterpret_cast<u8*>(cache_addr);
}
namespace Core {
namespace Frontend {
class EmuWindow;
@ -230,8 +220,6 @@ public:
[[nodiscard]] u64 GetTicks() const;
[[nodiscard]] std::unique_lock<std::mutex> LockSync();
[[nodiscard]] bool IsAsync() const;
[[nodiscard]] bool UseNvdec() const;