2022-04-23 10:59:50 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
#include "core/arm/exclusive_monitor.h"
|
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/hle/kernel/k_condition_variable.h"
|
2021-04-04 04:11:46 +02:00
|
|
|
#include "core/hle/kernel/k_linked_list.h"
|
2021-04-24 07:04:28 +02:00
|
|
|
#include "core/hle/kernel/k_process.h"
|
2020-12-29 08:45:28 +01:00
|
|
|
#include "core/hle/kernel/k_scheduler.h"
|
|
|
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
2020-12-31 08:01:08 +01:00
|
|
|
#include "core/hle/kernel/k_thread.h"
|
2021-11-10 06:28:09 +01:00
|
|
|
#include "core/hle/kernel/k_thread_queue.h"
|
2020-12-29 08:45:28 +01:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
|
#include "core/hle/kernel/svc_common.h"
|
|
|
|
#include "core/hle/kernel/svc_results.h"
|
|
|
|
#include "core/memory.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
|
|
|
|
*out = system.Memory().Read32(address);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
|
|
|
system.Memory().Write32(address, *p);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
|
|
|
u32 new_orr_mask) {
|
|
|
|
auto& monitor = system.Monitor();
|
2021-08-07 07:45:18 +02:00
|
|
|
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
// Load the value from the address.
|
|
|
|
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
|
|
|
|
|
|
|
// Orr in the new mask.
|
|
|
|
u32 value = expected | new_orr_mask;
|
|
|
|
|
|
|
|
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
|
|
|
if (!expected) {
|
|
|
|
value = if_zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to store.
|
|
|
|
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
|
|
|
|
// If we failed to store, try again.
|
|
|
|
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're done.
|
|
|
|
*out = expected;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-11-10 06:28:09 +01:00
|
|
|
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
|
|
|
public:
|
|
|
|
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
|
|
|
: KThreadQueue(kernel_) {}
|
|
|
|
|
2021-12-06 08:49:26 +01:00
|
|
|
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
|
|
bool cancel_timer_task) override {
|
2021-11-10 06:28:09 +01:00
|
|
|
// Remove the thread as a waiter from its owner.
|
|
|
|
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
|
|
|
|
|
|
|
// Invoke the base cancel wait handler.
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
|
|
|
|
private:
|
|
|
|
KConditionVariable::ThreadTree* m_tree;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
|
|
|
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
|
|
|
: KThreadQueue(kernel_), m_tree(t) {}
|
|
|
|
|
2021-12-06 08:49:26 +01:00
|
|
|
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
|
|
bool cancel_timer_task) override {
|
2021-11-10 06:28:09 +01:00
|
|
|
// Remove the thread as a waiter from its owner.
|
|
|
|
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
|
|
|
owner->RemoveWaiter(waiting_thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the thread is waiting on a condvar, remove it from the tree.
|
|
|
|
if (waiting_thread->IsWaitingForConditionVariable()) {
|
|
|
|
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
|
|
|
waiting_thread->ClearConditionVariable();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invoke the base cancel wait handler.
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-12-29 08:45:28 +01:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
KConditionVariable::KConditionVariable(Core::System& system_)
|
|
|
|
: system{system_}, kernel{system.Kernel()} {}
|
|
|
|
|
|
|
|
KConditionVariable::~KConditionVariable() = default;
|
|
|
|
|
|
|
|
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
2022-06-16 16:35:52 +02:00
|
|
|
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
// Signal the address.
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(kernel);
|
|
|
|
|
|
|
|
// Remove waiter thread.
|
|
|
|
s32 num_waiters{};
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* next_owner_thread =
|
2020-12-29 08:45:28 +01:00
|
|
|
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
|
|
|
|
|
|
|
// Determine the next tag.
|
|
|
|
u32 next_value{};
|
2021-11-10 07:06:49 +01:00
|
|
|
if (next_owner_thread != nullptr) {
|
2020-12-29 08:45:28 +01:00
|
|
|
next_value = next_owner_thread->GetAddressKeyValue();
|
|
|
|
if (num_waiters > 1) {
|
|
|
|
next_value |= Svc::HandleWaitMask;
|
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Write the value to userspace.
|
|
|
|
ResultCode result{ResultSuccess};
|
|
|
|
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
|
|
|
result = ResultSuccess;
|
|
|
|
} else {
|
|
|
|
result = ResultInvalidCurrentMemory;
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Signal the next owner thread.
|
|
|
|
next_owner_thread->EndWait(result);
|
|
|
|
return result;
|
|
|
|
} else {
|
|
|
|
// Just write the value to userspace.
|
|
|
|
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
|
|
|
|
ResultInvalidCurrentMemory);
|
|
|
|
|
|
|
|
return ResultSuccess;
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
2022-06-16 16:35:52 +02:00
|
|
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
2021-11-10 06:28:09 +01:00
|
|
|
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
// Wait for the address.
|
2021-11-10 07:06:49 +01:00
|
|
|
KThread* owner_thread{};
|
2020-12-29 08:45:28 +01:00
|
|
|
{
|
2021-11-10 07:06:49 +01:00
|
|
|
KScopedSchedulerLock sl(kernel);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Check if the thread should terminate.
|
|
|
|
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Read the tag from userspace.
|
|
|
|
u32 test_tag{};
|
|
|
|
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// If the tag isn't the handle (with wait mask), we're done.
|
|
|
|
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
|
|
|
|
|
|
|
// Get the lock owner thread.
|
|
|
|
owner_thread = kernel.CurrentProcess()
|
|
|
|
->GetHandleTable()
|
|
|
|
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
|
|
|
.ReleasePointerUnsafe();
|
|
|
|
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
|
|
|
|
|
|
|
|
// Update the lock.
|
|
|
|
cur_thread->SetAddressKey(addr, value);
|
|
|
|
owner_thread->AddWaiter(cur_thread);
|
|
|
|
|
|
|
|
// Begin waiting.
|
|
|
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
|
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
|
|
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Close our reference to the owner thread, now that the wait is over.
|
|
|
|
owner_thread->Close();
|
|
|
|
|
2020-12-29 08:45:28 +01:00
|
|
|
// Get the wait result.
|
2021-11-10 04:21:20 +01:00
|
|
|
return cur_thread->GetWaitResult();
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
void KConditionVariable::SignalImpl(KThread* thread) {
|
2020-12-29 08:45:28 +01:00
|
|
|
// Check pre-conditions.
|
|
|
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
|
|
|
|
|
|
// Update the tag.
|
|
|
|
VAddr address = thread->GetAddressKey();
|
|
|
|
u32 own_tag = thread->GetAddressKeyValue();
|
|
|
|
|
|
|
|
u32 prev_tag{};
|
|
|
|
bool can_access{};
|
|
|
|
{
|
|
|
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
|
|
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
|
|
|
can_access = true;
|
2021-11-10 07:06:49 +01:00
|
|
|
if (can_access) [[likely]] {
|
2020-12-29 08:45:28 +01:00
|
|
|
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
|
|
|
Svc::HandleWaitMask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
if (can_access) [[likely]] {
|
2021-04-24 11:40:31 +02:00
|
|
|
if (prev_tag == Svc::InvalidHandle) {
|
2020-12-29 08:45:28 +01:00
|
|
|
// If nobody held the lock previously, we're all good.
|
2021-11-10 06:28:09 +01:00
|
|
|
thread->EndWait(ResultSuccess);
|
2020-12-29 08:45:28 +01:00
|
|
|
} else {
|
|
|
|
// Get the previous owner.
|
2021-04-10 07:10:14 +02:00
|
|
|
KThread* owner_thread = kernel.CurrentProcess()
|
|
|
|
->GetHandleTable()
|
|
|
|
.GetObjectWithoutPseudoHandle<KThread>(
|
|
|
|
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
|
|
|
.ReleasePointerUnsafe();
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
if (owner_thread) [[likely]] {
|
2020-12-29 08:45:28 +01:00
|
|
|
// Add the thread as a waiter on the owner.
|
|
|
|
owner_thread->AddWaiter(thread);
|
2021-11-10 07:06:49 +01:00
|
|
|
owner_thread->Close();
|
2020-12-29 08:45:28 +01:00
|
|
|
} else {
|
|
|
|
// The lock was tagged with a thread that doesn't exist.
|
2021-11-10 06:28:09 +01:00
|
|
|
thread->EndWait(ResultInvalidState);
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the address wasn't accessible, note so.
|
2021-11-10 06:28:09 +01:00
|
|
|
thread->EndWait(ResultInvalidCurrentMemory);
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
|
|
|
// Perform signaling.
|
2021-12-06 08:51:24 +01:00
|
|
|
s32 num_waiters{};
|
2020-12-29 08:45:28 +01:00
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl(kernel);
|
|
|
|
|
2022-03-11 07:58:48 +01:00
|
|
|
auto it = thread_tree.nfind_key({cv_key, -1});
|
2020-12-29 08:45:28 +01:00
|
|
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
|
|
|
(it->GetConditionVariableKey() == cv_key)) {
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* target_thread = std::addressof(*it);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
this->SignalImpl(target_thread);
|
2020-12-29 08:45:28 +01:00
|
|
|
it = thread_tree.erase(it);
|
|
|
|
target_thread->ClearConditionVariable();
|
|
|
|
++num_waiters;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have no waiters, clear the has waiter flag.
|
|
|
|
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
2021-12-06 08:51:24 +01:00
|
|
|
const u32 has_waiter_flag{};
|
2020-12-29 08:45:28 +01:00
|
|
|
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
|
|
|
// Prepare to wait.
|
2021-11-10 07:06:49 +01:00
|
|
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
2021-11-10 06:28:09 +01:00
|
|
|
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
|
|
|
kernel, std::addressof(thread_tree));
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
{
|
2021-11-10 07:06:49 +01:00
|
|
|
KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
|
|
|
// Check that the thread isn't terminating.
|
|
|
|
if (cur_thread->IsTerminationRequested()) {
|
|
|
|
slp.CancelSleep();
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultTerminationRequested;
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the value and process for the next owner.
|
|
|
|
{
|
|
|
|
// Remove waiter thread.
|
|
|
|
s32 num_waiters{};
|
2020-12-31 08:01:08 +01:00
|
|
|
KThread* next_owner_thread =
|
2020-12-29 08:45:28 +01:00
|
|
|
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
|
|
|
|
|
|
|
// Update for the next owner thread.
|
|
|
|
u32 next_value{};
|
|
|
|
if (next_owner_thread != nullptr) {
|
|
|
|
// Get the next tag value.
|
|
|
|
next_value = next_owner_thread->GetAddressKeyValue();
|
|
|
|
if (num_waiters > 1) {
|
|
|
|
next_value |= Svc::HandleWaitMask;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wake up the next owner.
|
2021-11-10 06:28:09 +01:00
|
|
|
next_owner_thread->EndWait(ResultSuccess);
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write to the cv key.
|
|
|
|
{
|
|
|
|
const u32 has_waiter_flag = 1;
|
|
|
|
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
|
|
|
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the value to userspace.
|
|
|
|
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
|
|
|
slp.CancelSleep();
|
2021-02-13 00:43:01 +01:00
|
|
|
return ResultInvalidCurrentMemory;
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// If timeout is zero, time out.
|
|
|
|
R_UNLESS(timeout != 0, ResultTimedOut);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Update condition variable tracking.
|
|
|
|
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
|
|
|
thread_tree.insert(*cur_thread);
|
2020-12-29 08:45:28 +01:00
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Begin waiting.
|
|
|
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
|
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
|
|
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
2021-11-10 07:06:49 +01:00
|
|
|
// Get the wait result.
|
2021-11-10 04:21:20 +01:00
|
|
|
return cur_thread->GetWaitResult();
|
2020-12-29 08:45:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Kernel
|