From e18ee8d681bf05e8c1480dd1ad7133778ead773d Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 16 Nov 2020 21:02:45 -0800 Subject: [PATCH 01/26] hle: kernel: Port KAffinityMask from Mesosphere. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_affinity_mask.h | 62 +++++++++++++++++++++++++++ src/core/hle/kernel/scheduler.cpp | 10 ++--- src/core/hle/kernel/svc.cpp | 2 +- src/core/hle/kernel/thread.cpp | 11 ++--- src/core/hle/kernel/thread.h | 6 +-- src/yuzu/debugger/wait_tree.cpp | 4 +- 7 files changed, 80 insertions(+), 16 deletions(-) create mode 100644 src/core/hle/kernel/k_affinity_mask.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 66de33799d..adabe8dbdb 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -152,6 +152,7 @@ add_library(core STATIC hle/kernel/handle_table.h hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.h + hle/kernel/k_affinity_mask.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory/address_space_info.cpp diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h new file mode 100644 index 0000000000..fa2a720a4e --- /dev/null +++ b/src/core/hle/kernel/k_affinity_mask.h @@ -0,0 +1,62 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hardware_properties.h" + +namespace Kernel { + +class KAffinityMask { +private: + static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1; + +private: + u64 mask; + +private: + static constexpr u64 GetCoreBit(s32 core) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + return (1ull << core); + } + +public: + constexpr KAffinityMask() : mask(0) { + ASSERT(this); + } + + constexpr u64 GetAffinityMask() const { + return this->mask; + } + + constexpr void SetAffinityMask(u64 new_mask) { + ASSERT((new_mask & ~AllowedAffinityMask) == 0); + this->mask = new_mask; + } + + constexpr bool GetAffinity(s32 core) const { + return this->mask & GetCoreBit(core); + } + + constexpr void SetAffinity(s32 core, bool set) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + + if (set) { + this->mask |= GetCoreBit(core); + } else { + this->mask &= ~GetCoreBit(core); + } + } + + constexpr void SetAll() { + this->mask = AllowedAffinityMask; + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5c63b0b4a2..9a969fdb55 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -452,7 +452,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Unsuggest(thread->current_priority, core, thread); } } @@ -464,7 +464,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Suggest(thread->current_priority, core, thread); } } @@ -484,7 +484,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Unsuggest(old_priority, core, thread); } } @@ -500,7 +500,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (core != static_cast(thread->processor_id) && - ((thread->affinity_mask >> core) & 1) != 0) { + thread->affinity_mask.GetAffinity(core)) { Suggest(thread->current_priority, core, thread); } } @@ -527,7 +527,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit } for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (((thread->affinity_mask >> core) & 1) != 0) { + if (thread->affinity_mask.GetAffinity(core)) { if (core == static_cast(thread->processor_id)) { Schedule(thread->current_priority, core, thread); } else { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 95d6e2b4d8..0cd712d09d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -2003,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, } *core = thread->GetIdealCore(); - *mask = thread->GetAffinityMask(); + *mask = thread->GetAffinityMask().GetAffinityMask(); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 7d1eb2c6ed..38b4a09876 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -191,7 +191,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->last_running_ticks = 0; thread->processor_id = processor_id; thread->ideal_core = processor_id; - thread->affinity_mask = 1ULL << processor_id; + thread->affinity_mask.SetAffinity(processor_id, true); thread->wait_objects = nullptr; thread->mutex_wait_address = 0; thread->condvar_wait_address = 0; @@ -479,15 +479,16 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { } if (use_override) { ideal_core_override = new_core; - affinity_mask_override = new_affinity_mask; } else { - const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); + const auto old_affinity_mask = affinity_mask.GetAffinityMask(); + affinity_mask.SetAffinityMask(new_affinity_mask); ideal_core = new_core; if (old_affinity_mask != new_affinity_mask) { const s32 old_core = processor_id; - if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { + if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { if (static_cast(ideal_core) < 0) { - processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); + processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), + Core::Hardware::NUM_CPU_CORES); } else { processor_id = ideal_core; } diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index a75071e9b5..5192ecff12 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "common/spin_lock.h" #include "core/arm/arm_interface.h" +#include "core/hle/kernel/k_affinity_mask.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" @@ -469,7 +470,7 @@ public: return ideal_core; } - u64 GetAffinityMask() const { + constexpr const KAffinityMask& GetAffinityMask() const { return affinity_mask; } @@ -649,10 +650,9 @@ private: Scheduler* scheduler = nullptr; u32 ideal_core{0xFFFFFFFF}; - u64 affinity_mask{0x1}; + KAffinityMask affinity_mask{}; s32 ideal_core_override = -1; - u64 affinity_mask_override = 0x1; u32 affinity_override_count = 0; u32 scheduling_state = 0; diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index a208247194..c4ae1d61fd 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -349,8 +349,8 @@ std::vector> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique(tr("processor = %1").arg(processor))); list.push_back( std::make_unique(tr("ideal core = %1").arg(thread.GetIdealCore()))); - list.push_back( - std::make_unique(tr("affinity mask = %1").arg(thread.GetAffinityMask()))); + list.push_back(std::make_unique( + tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask()))); list.push_back(std::make_unique(tr("thread id = %1").arg(thread.GetThreadID()))); list.push_back(std::make_unique(tr("priority = %1(current) / %2(normal)") .arg(thread.GetPriority()) From 8dbfa4e1a4af2cda4500304ba8fe1a1b09bbb814 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 27 Nov 2020 14:13:18 -0800 Subject: [PATCH 02/26] common: Port BitSet from Mesosphere. --- src/common/CMakeLists.txt | 1 + src/common/bit_set.h | 100 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 src/common/bit_set.h diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 56c7e21f59..fc2ed99990 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -104,6 +104,7 @@ add_library(common STATIC detached_tasks.h bit_cast.h bit_field.h + bit_set.h bit_util.h cityhash.cpp cityhash.h diff --git a/src/common/bit_set.h b/src/common/bit_set.h new file mode 100644 index 0000000000..4f966de40e --- /dev/null +++ b/src/common/bit_set.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#include "common/alignment.h" +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Common { + +namespace impl { + +#define BITSIZEOF(x) (sizeof(x) * CHAR_BIT) + +template +class BitSet { +private: + static_assert(std::is_integral::value); + static_assert(std::is_unsigned::value); + static_assert(sizeof(Storage) <= sizeof(u64)); + + static constexpr size_t FlagsPerWord = BITSIZEOF(Storage); + static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord; + + static constexpr auto CountLeadingZeroImpl(Storage word) { + return CountLeadingZeroes64(static_cast(word)) - + (BITSIZEOF(unsigned long long) - FlagsPerWord); + } + + static constexpr Storage GetBitMask(size_t bit) { + return Storage(1) << (FlagsPerWord - 1 - bit); + } + +private: + Storage words[NumWords]; + +public: + constexpr BitSet() : words() { /* ... */ + } + + constexpr void SetBit(size_t i) { + this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord); + } + + constexpr void ClearBit(size_t i) { + this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord); + } + + constexpr size_t CountLeadingZero() const { + for (size_t i = 0; i < NumWords; i++) { + if (this->words[i]) { + return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]); + } + } + return FlagsPerWord * NumWords; + } + + constexpr size_t GetNextSet(size_t n) const { + for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) { + Storage word = this->words[i]; + if (!IsAligned(n + 1, FlagsPerWord)) { + word &= GetBitMask(n % FlagsPerWord) - 1; + } + if (word) { + return FlagsPerWord * i + CountLeadingZeroImpl(word); + } + } + return FlagsPerWord * NumWords; + } +}; + +} // namespace impl + +template +using BitSet8 = impl::BitSet; + +template +using BitSet16 = impl::BitSet; + +template +using BitSet32 = impl::BitSet; + +template +using BitSet64 = impl::BitSet; + +} // namespace Common From a3ccac3eb7f6feb87ca7026c89531a0afd5fabab Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:23:36 -0800 Subject: [PATCH 03/26] common: Port KPriorityQueue from Mesosphere. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_priority_queue.h | 443 +++++++++++++++++++++++++ 2 files changed, 444 insertions(+) create mode 100644 src/core/hle/kernel/k_priority_queue.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index adabe8dbdb..4de159119b 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -153,6 +153,7 @@ add_library(core STATIC hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.h hle/kernel/k_affinity_mask.h + hle/kernel/k_priority_queue.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory/address_space_info.cpp diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h new file mode 100644 index 0000000000..88e4e1ed4a --- /dev/null +++ b/src/core/hle/kernel/k_priority_queue.h @@ -0,0 +1,443 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/bit_set.h" +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Kernel { + +class Thread; + +template +concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T & t) { + { t.GetAffinityMask() } + ->std::convertible_to; + {t.SetAffinityMask(std::declval())}; + + { t.GetAffinity(std::declval()) } + ->std::same_as; + {t.SetAffinity(std::declval(), std::declval())}; + {t.SetAll()}; +}; + +template +concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { + {typename T::QueueEntry()}; + {(typename T::QueueEntry()).Initialize()}; + {(typename T::QueueEntry()).SetPrev(std::addressof(t))}; + {(typename T::QueueEntry()).SetNext(std::addressof(t))}; + { (typename T::QueueEntry()).GetNext() } + ->std::same_as; + { (typename T::QueueEntry()).GetPrev() } + ->std::same_as; + { t.GetPriorityQueueEntry(std::declval()) } + ->std::same_as; + + {t.GetAffinityMask()}; + { typename std::remove_cvref::type() } + ->KPriorityQueueAffinityMask; + + { t.GetActiveCore() } + ->std::convertible_to; + { t.GetPriority() } + ->std::convertible_to; +}; + +template +requires KPriorityQueueMember class KPriorityQueue { +public: + using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + + static_assert(LowestPriority >= 0); + static_assert(HighestPriority >= 0); + static_assert(LowestPriority >= HighestPriority); + static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1; + static constexpr size_t NumCores = _NumCores; + + static constexpr bool IsValidCore(s32 core) { + return 0 <= core && core < static_cast(NumCores); + } + + static constexpr bool IsValidPriority(s32 priority) { + return HighestPriority <= priority && priority <= LowestPriority + 1; + } + +private: + using Entry = typename Member::QueueEntry; + +public: + class KPerCoreQueue { + private: + Entry root[NumCores]; + + public: + constexpr KPerCoreQueue() : root() { + for (size_t i = 0; i < NumCores; i++) { + this->root[i].Initialize(); + } + } + + constexpr bool PushBack(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the end of the queue. */ + Member* tail = this->root[core].GetPrev(); + Entry& tail_entry = + (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(tail); + member_entry.SetNext(nullptr); + tail_entry.SetNext(member); + this->root[core].SetPrev(member); + + return (tail == nullptr); + } + + constexpr bool PushFront(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the front of the queue. */ + Member* head = this->root[core].GetNext(); + Entry& head_entry = + (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(nullptr); + member_entry.SetNext(head); + head_entry.SetPrev(member); + this->root[core].SetNext(member); + + return (head == nullptr); + } + + constexpr bool Remove(s32 core, Member* member) { + /* Get the entry associated with the member. */ + Entry& member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entries associated with next and prev. */ + Member* prev = member_entry.GetPrev(); + Member* next = member_entry.GetNext(); + Entry& prev_entry = + (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; + Entry& next_entry = + (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + + return (this->GetFront(core) == nullptr); + } + + constexpr Member* GetFront(s32 core) const { + return this->root[core].GetNext(); + } + }; + + class KPriorityQueueImpl { + private: + KPerCoreQueue queues[NumPriority]; + Common::BitSet64 available_priorities[NumCores]; + + public: + constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ + } + + constexpr void PushBack(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr void PushFront(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr void Remove(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + if (this->queues[priority].Remove(core, member)) { + this->available_priorities[core].ClearBit(priority); + } + } + } + + constexpr Member* GetFront(s32 core) const { + ASSERT(IsValidCore(core)); + + const s32 priority = + static_cast(this->available_priorities[core].CountLeadingZero()); + if (priority <= LowestPriority) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr Member* GetFront(s32 priority, s32 core) const { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr Member* GetNext(s32 core, const Member* member) const { + ASSERT(IsValidCore(core)); + + Member* next = member->GetPriorityQueueEntry(core).GetNext(); + if (next == nullptr) { + const s32 priority = static_cast( + this->available_priorities[core].GetNextSet(member->GetPriority())); + if (priority <= LowestPriority) { + next = this->queues[priority].GetFront(core); + } + } + return next; + } + + constexpr void MoveToFront(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushFront(core, member); + } + } + + constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) { + ASSERT(IsValidCore(core)); + ASSERT(IsValidPriority(priority)); + + if (priority <= LowestPriority) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushBack(core, member); + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + }; + +private: + KPriorityQueueImpl scheduled_queue; + KPriorityQueueImpl suggested_queue; + +private: + constexpr void ClearAffinityBit(u64& affinity, s32 core) { + affinity &= ~(u64(1ul) << core); + } + + constexpr s32 GetNextCore(u64& affinity) { + const s32 core = Common::CountTrailingZeroes64(affinity); + ClearAffinityBit(affinity, core); + return core; + } + + constexpr void PushBack(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushBack(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr void PushFront(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushFront(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr void Remove(s32 priority, Member* member) { + ASSERT(IsValidPriority(priority)); + + /* Remove from the scheduled queue for its core. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.Remove(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* Remove from the suggested queue for all other cores. */ + while (affinity) { + this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + } + } + +public: + constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ + } + + /* Getters. */ + constexpr Member* GetScheduledFront(s32 core) const { + return this->scheduled_queue.GetFront(core); + } + + constexpr Member* GetScheduledFront(s32 core, s32 priority) const { + return this->scheduled_queue.GetFront(priority, core); + } + + constexpr Member* GetSuggestedFront(s32 core) const { + return this->suggested_queue.GetFront(core); + } + + constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { + return this->suggested_queue.GetFront(priority, core); + } + + constexpr Member* GetScheduledNext(s32 core, const Member* member) const { + return this->scheduled_queue.GetNext(core, member); + } + + constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { + return this->suggested_queue.GetNext(core, member); + } + + constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { + return member->GetPriorityQueueEntry(core).GetNext(); + } + + /* Mutators. */ + constexpr void PushBack(Member* member) { + this->PushBack(member->GetPriority(), member); + } + + constexpr void Remove(Member* member) { + this->Remove(member->GetPriority(), member); + } + + constexpr void MoveToScheduledFront(Member* member) { + this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + } + + constexpr Thread* MoveToScheduledBack(Member* member) { + return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), + member); + } + + /* First class fancy operations. */ + constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { + ASSERT(IsValidPriority(prev_priority)); + + /* Remove the member from the queues. */ + const s32 new_priority = member->GetPriority(); + this->Remove(prev_priority, member); + + /* And enqueue. If the member is running, we want to keep it running. */ + if (is_running) { + this->PushFront(new_priority, member); + } else { + this->PushBack(new_priority, member); + } + } + + constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, + Member* member) { + /* Get the new information. */ + const s32 priority = member->GetPriority(); + const AffinityMaskType& new_affinity = member->GetAffinityMask(); + const s32 new_core = member->GetActiveCore(); + + /* Remove the member from all queues it was in before. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (prev_affinity.GetAffinity(core)) { + if (core == prev_core) { + this->scheduled_queue.Remove(priority, core, member); + } else { + this->suggested_queue.Remove(priority, core, member); + } + } + } + + /* And add the member to all queues it should be in now. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (new_affinity.GetAffinity(core)) { + if (core == new_core) { + this->scheduled_queue.PushBack(priority, core, member); + } else { + this->suggested_queue.PushBack(priority, core, member); + } + } + } + } + + constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { + /* Get the new information. */ + const s32 new_core = member->GetActiveCore(); + const s32 priority = member->GetPriority(); + + /* We don't need to do anything if the core is the same. */ + if (prev_core != new_core) { + /* Remove from the scheduled queue for the previous core. */ + if (prev_core >= 0) { + this->scheduled_queue.Remove(priority, prev_core, member); + } + + /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + if (new_core >= 0) { + this->suggested_queue.Remove(priority, new_core, member); + if (to_front) { + this->scheduled_queue.PushFront(priority, new_core, member); + } else { + this->scheduled_queue.PushBack(priority, new_core, member); + } + } + + /* Add to the suggested queue for the previous core. */ + if (prev_core >= 0) { + this->suggested_queue.PushBack(priority, prev_core, member); + } + } + } +}; + +} // namespace Kernel From 493263f4159b15f54c5683d92590cb20b60e4d7a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:43:29 -0800 Subject: [PATCH 04/26] hle: kernel: svc: Remove unnecessary hack in svcSleep. --- src/core/hle/kernel/svc.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 0cd712d09d..fcb864427c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1627,13 +1627,6 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { } else { current_thread->Sleep(nanoseconds); } - - if (is_redundant && !system.Kernel().IsMulticore()) { - system.Kernel().ExitSVCProfile(); - system.CoreTiming().AddTicks(1000U); - system.GetCpuManager().PreemptSingleCore(); - system.Kernel().EnterSVCProfile(); - } } static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) { From d58a609ae43d2a1db296e90cb87dcfa3e4d4e7f3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:54:41 -0800 Subject: [PATCH 05/26] hle: kernel: process: Add schedule count tracking, to be used for yield impl. --- src/core/hle/kernel/process.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index f45cb56746..36d8547bde 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -216,6 +216,16 @@ public: total_process_running_time_ticks += ticks; } + /// Gets the process schedule count, used for thread yelding + constexpr s64 GetScheduledCount() const { + return schedule_count; + } + + /// Increments the process schedule count, used for thread yielding. + constexpr void IncrementScheduledCount() { + ++schedule_count; + } + /// Gets 8 bytes of random data for svcGetInfo RandomEntropy u64 GetRandomEntropy(std::size_t index) const { return random_entropy.at(index); @@ -397,6 +407,9 @@ private: /// Name of this process std::string name; + /// Schedule count of this process + s64 schedule_count{}; + /// System context Core::System& system; }; From 39d356782e5652a7e9bbdf7f0748be0b126a42f2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 11:55:30 -0800 Subject: [PATCH 06/26] hle: kernel: svc: Remove reschedule on svcBreak. - This breaks things, and is unnecessary, since emulation will be done at this point. --- src/core/hle/kernel/svc.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index fcb864427c..9742aaf4c4 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -658,7 +658,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); if (!break_reason.signal_debugger) { - SchedulerLock lock(system.Kernel()); LOG_CRITICAL( Debug_Emulated, "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", @@ -669,10 +668,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast(thread_processor_id)).LogBacktrace(); - - // Kill the current thread - system.Kernel().ExceptionalExit(); - current_thread->Stop(); } } From 7e5d0f1fe311663329bc15b9e387f3ce2083dcf0 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 28 Nov 2020 13:34:07 -0800 Subject: [PATCH 07/26] hle: kernel: Port KAbstractSchedulerLock from Mesosphere. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/k_scheduler_lock.h | 76 ++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 src/core/hle/kernel/k_scheduler_lock.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 4de159119b..2e60a8331e 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -154,6 +154,7 @@ add_library(core STATIC hle/kernel/hle_ipc.h hle/kernel/k_affinity_mask.h hle/kernel/k_priority_queue.h + hle/kernel/k_scheduler_lock.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory/address_space_info.cpp diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h new file mode 100644 index 0000000000..d46f5fc040 --- /dev/null +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -0,0 +1,76 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/assert.h" +#include "common/spin_lock.h" +#include "core/hardware_properties.h" + +namespace Kernel { + +class KernelCore; + +template +class KAbstractSchedulerLock { +private: + KernelCore& kernel; + Common::SpinLock spin_lock; + s32 lock_count; + Core::EmuThreadHandle owner_thread; + +public: + KAbstractSchedulerLock(KernelCore& kernel) + : kernel{kernel}, spin_lock(), lock_count(0), + owner_thread(Core::EmuThreadHandle::InvalidHandle()) {} + + bool IsLockedByCurrentThread() const { + return this->owner_thread == kernel.GetCurrentEmuThreadID(); + } + + void Lock() { + if (this->IsLockedByCurrentThread()) { + /* If we already own the lock, we can just increment the count. */ + ASSERT(this->lock_count > 0); + this->lock_count++; + } else { + /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + SchedulerType::DisableScheduling(kernel); + this->spin_lock.lock(); + + /* For debug, ensure that our state is valid. */ + ASSERT(this->lock_count == 0); + ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); + + /* Increment count, take ownership. */ + this->lock_count = 1; + this->owner_thread = kernel.GetCurrentEmuThreadID(); + } + } + + void Unlock() { + ASSERT(this->IsLockedByCurrentThread()); + ASSERT(this->lock_count > 0); + + /* Release an instance of the lock. */ + if ((--this->lock_count) == 0) { + /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + const u64 cores_needing_scheduling = + SchedulerType::UpdateHighestPriorityThreads(kernel); + Core::EmuThreadHandle leaving_thread = owner_thread; + + /* Note that we no longer hold the lock, and unlock the spinlock. */ + this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); + this->spin_lock.unlock(); + + /* Enable scheduling, and perform a rescheduling operation. */ + SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); + } + } +}; + +} // namespace Kernel From c10a37e5b6bdece089fc765e9843f81a097b08a2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 1 Dec 2020 20:53:51 -0800 Subject: [PATCH 08/26] hle: kernel: physical_core: Clear exclusive state after each run. - This is closer to pre-multicore behavior, and works a bit better. --- src/core/arm/dynarmic/arm_dynarmic_32.cpp | 3 +++ src/core/arm/dynarmic/arm_dynarmic_64.cpp | 3 +++ src/core/hle/kernel/physical_core.cpp | 1 + 3 files changed, 7 insertions(+) diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 193fd7d62a..e9c74b1a6c 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -294,6 +294,9 @@ void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) { } void ARM_Dynarmic_32::ClearExclusiveState() { + if (!jit) { + return; + } jit->ClearExclusiveState(); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 0f0585d0fa..b63f79915f 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -330,6 +330,9 @@ void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) { } void ARM_Dynarmic_64::ClearExclusiveState() { + if (!jit) { + return; + } jit->ClearExclusiveState(); } diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 50aca57521..d6a5742bd3 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -37,6 +37,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); + arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { From 9e29e36a784496f7290c03b6a42e400a164a5b1e Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 2 Dec 2020 18:08:35 -0800 Subject: [PATCH 09/26] hle: kernel: Rewrite scheduler implementation based on Mesopshere. --- src/core/CMakeLists.txt | 4 +- src/core/arm/dynarmic/arm_dynarmic_64.cpp | 2 +- src/core/core.cpp | 28 +- src/core/core.h | 22 +- src/core/cpu_manager.cpp | 55 +- src/core/hle/kernel/address_arbiter.cpp | 6 +- src/core/hle/kernel/handle_table.cpp | 4 +- src/core/hle/kernel/hle_ipc.cpp | 2 +- src/core/hle/kernel/k_scheduler.cpp | 873 ++++++++++++++++++ .../hle/kernel/{scheduler.h => k_scheduler.h} | 285 +++--- src/core/hle/kernel/kernel.cpp | 59 +- src/core/hle/kernel/kernel.h | 17 +- src/core/hle/kernel/mutex.cpp | 6 +- src/core/hle/kernel/physical_core.cpp | 9 +- src/core/hle/kernel/physical_core.h | 13 +- src/core/hle/kernel/process.cpp | 6 +- src/core/hle/kernel/readable_event.cpp | 2 +- src/core/hle/kernel/scheduler.cpp | 819 ---------------- src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/svc.cpp | 57 +- src/core/hle/kernel/synchronization.cpp | 4 +- src/core/hle/kernel/thread.cpp | 50 +- src/core/hle/kernel/thread.h | 107 ++- src/core/hle/kernel/time_manager.cpp | 2 +- src/core/hle/service/time/time.cpp | 2 +- src/yuzu/debugger/wait_tree.cpp | 6 +- 26 files changed, 1217 insertions(+), 1225 deletions(-) create mode 100644 src/core/hle/kernel/k_scheduler.cpp rename src/core/hle/kernel/{scheduler.h => k_scheduler.h} (57%) delete mode 100644 src/core/hle/kernel/scheduler.cpp diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 2e60a8331e..662839ff87 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -154,6 +154,8 @@ add_library(core STATIC hle/kernel/hle_ipc.h hle/kernel/k_affinity_mask.h hle/kernel/k_priority_queue.h + hle/kernel/k_scheduler.cpp + hle/kernel/k_scheduler.h hle/kernel/k_scheduler_lock.h hle/kernel/kernel.cpp hle/kernel/kernel.h @@ -189,8 +191,6 @@ add_library(core STATIC hle/kernel/readable_event.h hle/kernel/resource_limit.cpp hle/kernel/resource_limit.h - hle/kernel/scheduler.cpp - hle/kernel/scheduler.h hle/kernel/server_port.cpp hle/kernel/server_port.h hle/kernel/server_session.cpp diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index b63f79915f..7a4eb88a24 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -15,8 +15,8 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/svc.h" #include "core/memory.h" #include "core/settings.h" diff --git a/src/core/core.cpp b/src/core/core.cpp index 01e4faac8f..77d21d41c5 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -27,10 +27,10 @@ #include "core/file_sys/vfs_real.h" #include "core/hardware_interrupt_manager.h" #include "core/hle/kernel/client_port.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/service/am/applets/applets.h" #include "core/hle/service/apm/controller.h" @@ -508,14 +508,6 @@ std::size_t System::CurrentCoreIndex() const { return core; } -Kernel::Scheduler& System::CurrentScheduler() { - return impl->kernel.CurrentScheduler(); -} - -const Kernel::Scheduler& System::CurrentScheduler() const { - return impl->kernel.CurrentScheduler(); -} - Kernel::PhysicalCore& System::CurrentPhysicalCore() { return impl->kernel.CurrentPhysicalCore(); } @@ -524,22 +516,14 @@ const Kernel::PhysicalCore& System::CurrentPhysicalCore() const { return impl->kernel.CurrentPhysicalCore(); } -Kernel::Scheduler& System::Scheduler(std::size_t core_index) { - return impl->kernel.Scheduler(core_index); -} - -const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const { - return impl->kernel.Scheduler(core_index); +/// Gets the global scheduler +Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() { + return impl->kernel.GlobalSchedulerContext(); } /// Gets the global scheduler -Kernel::GlobalScheduler& System::GlobalScheduler() { - return impl->kernel.GlobalScheduler(); -} - -/// Gets the global scheduler -const Kernel::GlobalScheduler& System::GlobalScheduler() const { - return impl->kernel.GlobalScheduler(); +const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const { + return impl->kernel.GlobalSchedulerContext(); } Kernel::Process* System::CurrentProcess() { diff --git a/src/core/core.h b/src/core/core.h index 29b8fb92a1..579a774e4f 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -26,11 +26,11 @@ class VfsFilesystem; } // namespace FileSys namespace Kernel { -class GlobalScheduler; +class GlobalSchedulerContext; class KernelCore; class PhysicalCore; class Process; -class Scheduler; +class KScheduler; } // namespace Kernel namespace Loader { @@ -213,12 +213,6 @@ public: /// Gets the index of the currently running CPU core [[nodiscard]] std::size_t CurrentCoreIndex() const; - /// Gets the scheduler for the CPU core that is currently running - [[nodiscard]] Kernel::Scheduler& CurrentScheduler(); - - /// Gets the scheduler for the CPU core that is currently running - [[nodiscard]] const Kernel::Scheduler& CurrentScheduler() const; - /// Gets the physical core for the CPU core that is currently running [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); @@ -261,17 +255,11 @@ public: /// Gets an immutable reference to the renderer. [[nodiscard]] const VideoCore::RendererBase& Renderer() const; - /// Gets the scheduler for the CPU core with the specified index - [[nodiscard]] Kernel::Scheduler& Scheduler(std::size_t core_index); - - /// Gets the scheduler for the CPU core with the specified index - [[nodiscard]] const Kernel::Scheduler& Scheduler(std::size_t core_index) const; + /// Gets the global scheduler + [[nodiscard]] Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); /// Gets the global scheduler - [[nodiscard]] Kernel::GlobalScheduler& GlobalScheduler(); - - /// Gets the global scheduler - [[nodiscard]] const Kernel::GlobalScheduler& GlobalScheduler() const; + [[nodiscard]] const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const; /// Gets the manager for the guest device memory [[nodiscard]] Core::DeviceMemory& DeviceMemory(); diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 0cff985e9a..1791543483 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -10,9 +10,9 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "video_core/gpu.h" @@ -109,11 +109,8 @@ void* CpuManager::GetStartFuncParamater() { void CpuManager::MultiCoreRunGuestThread() { auto& kernel = system.Kernel(); - { - auto& sched = kernel.CurrentScheduler(); - sched.OnThreadStart(); - } - auto* thread = kernel.CurrentScheduler().GetCurrentThread(); + kernel.CurrentScheduler()->OnThreadStart(); + auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); auto& host_context = thread->GetHostContext(); host_context->SetRewindPoint(GuestRewindFunction, this); MultiCoreRunGuestLoop(); @@ -130,8 +127,8 @@ void CpuManager::MultiCoreRunGuestLoop() { physical_core = &kernel.CurrentPhysicalCore(); } system.ExitDynarmicProfile(); - auto& scheduler = kernel.CurrentScheduler(); - scheduler.TryDoContextSwitch(); + physical_core->ArmInterface().ClearExclusiveState(); + kernel.CurrentScheduler()->RescheduleCurrentCore(); } } @@ -140,25 +137,21 @@ void CpuManager::MultiCoreRunIdleThread() { while (true) { auto& physical_core = kernel.CurrentPhysicalCore(); physical_core.Idle(); - auto& scheduler = kernel.CurrentScheduler(); - scheduler.TryDoContextSwitch(); + kernel.CurrentScheduler()->RescheduleCurrentCore(); } } void CpuManager::MultiCoreRunSuspendThread() { auto& kernel = system.Kernel(); - { - auto& sched = kernel.CurrentScheduler(); - sched.OnThreadStart(); - } + kernel.CurrentScheduler()->OnThreadStart(); while (true) { auto core = kernel.GetCurrentHostThreadID(); - auto& scheduler = kernel.CurrentScheduler(); + auto& scheduler = *kernel.CurrentScheduler(); Kernel::Thread* current_thread = scheduler.GetCurrentThread(); Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context); ASSERT(scheduler.ContextSwitchPending()); ASSERT(core == kernel.GetCurrentHostThreadID()); - scheduler.TryDoContextSwitch(); + scheduler.RescheduleCurrentCore(); } } @@ -206,11 +199,8 @@ void CpuManager::MultiCorePause(bool paused) { void CpuManager::SingleCoreRunGuestThread() { auto& kernel = system.Kernel(); - { - auto& sched = kernel.CurrentScheduler(); - sched.OnThreadStart(); - } - auto* thread = kernel.CurrentScheduler().GetCurrentThread(); + kernel.CurrentScheduler()->OnThreadStart(); + auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); auto& host_context = thread->GetHostContext(); host_context->SetRewindPoint(GuestRewindFunction, this); SingleCoreRunGuestLoop(); @@ -218,7 +208,7 @@ void CpuManager::SingleCoreRunGuestThread() { void CpuManager::SingleCoreRunGuestLoop() { auto& kernel = system.Kernel(); - auto* thread = kernel.CurrentScheduler().GetCurrentThread(); + auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); while (true) { auto* physical_core = &kernel.CurrentPhysicalCore(); system.EnterDynarmicProfile(); @@ -230,9 +220,10 @@ void CpuManager::SingleCoreRunGuestLoop() { thread->SetPhantomMode(true); system.CoreTiming().Advance(); thread->SetPhantomMode(false); + physical_core->ArmInterface().ClearExclusiveState(); PreemptSingleCore(); auto& scheduler = kernel.Scheduler(current_core); - scheduler.TryDoContextSwitch(); + scheduler.RescheduleCurrentCore(); } } @@ -244,24 +235,21 @@ void CpuManager::SingleCoreRunIdleThread() { system.CoreTiming().AddTicks(1000U); idle_count++; auto& scheduler = physical_core.Scheduler(); - scheduler.TryDoContextSwitch(); + scheduler.RescheduleCurrentCore(); } } void CpuManager::SingleCoreRunSuspendThread() { auto& kernel = system.Kernel(); - { - auto& sched = kernel.CurrentScheduler(); - sched.OnThreadStart(); - } + kernel.CurrentScheduler()->OnThreadStart(); while (true) { auto core = kernel.GetCurrentHostThreadID(); - auto& scheduler = kernel.CurrentScheduler(); + auto& scheduler = *kernel.CurrentScheduler(); Kernel::Thread* current_thread = scheduler.GetCurrentThread(); Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context); ASSERT(scheduler.ContextSwitchPending()); ASSERT(core == kernel.GetCurrentHostThreadID()); - scheduler.TryDoContextSwitch(); + scheduler.RescheduleCurrentCore(); } } @@ -280,12 +268,12 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) { } current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); system.CoreTiming().ResetTicks(); - scheduler.Unload(); + scheduler.Unload(scheduler.GetCurrentThread()); auto& next_scheduler = system.Kernel().Scheduler(current_core); Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); /// May have changed scheduler auto& current_scheduler = system.Kernel().Scheduler(current_core); - current_scheduler.Reload(); + current_scheduler.Reload(scheduler.GetCurrentThread()); auto* currrent_thread2 = current_scheduler.GetCurrentThread(); if (!currrent_thread2->IsIdleThread()) { idle_count = 0; @@ -369,8 +357,7 @@ void CpuManager::RunThread(std::size_t core) { return; } - auto& scheduler = system.Kernel().CurrentScheduler(); - Kernel::Thread* current_thread = scheduler.GetCurrentThread(); + auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); data.is_running = true; Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext()); data.is_running = false; diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 048acd30e9..bc32be18b6 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -12,8 +12,8 @@ #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -153,7 +153,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 bool should_decrement) { auto& memory = system.Memory(); auto& kernel = system.Kernel(); - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { @@ -223,7 +223,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { auto& memory = system.Memory(); auto& kernel = system.Kernel(); - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 3e745c18bc..40988b0fd4 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -8,9 +8,9 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" namespace Kernel { @@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const { std::shared_ptr HandleTable::GetGeneric(Handle handle) const { if (handle == CurrentThread) { - return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); } else if (handle == CurrentProcess) { return SharedFrom(kernel.CurrentProcess()); } diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 81f85643b5..7eda89786e 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -17,11 +17,11 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp new file mode 100644 index 0000000000..7f7da610d2 --- /dev/null +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -0,0 +1,873 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#include +#include +#include +#include +#include + +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/fiber.h" +#include "common/logging/log.h" +#include "core/arm/arm_interface.h" +#include "core/core.h" +#include "core/core_timing.h" +#include "core/cpu_manager.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/physical_core.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/time_manager.h" + +namespace Kernel { + +static void IncrementScheduledCount(Kernel::Thread* thread) { + if (auto process = thread->GetOwnerProcess(); process) { + process->IncrementScheduledCount(); + } +} + +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) + : kernel{kernel}, scheduler_lock{kernel} {} + +GlobalSchedulerContext::~GlobalSchedulerContext() = default; + +/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread) { + u32 current_core = global_thread.host_handle; + bool must_context_switch = global_thread.guest_handle != InvalidHandle && + (current_core < Core::Hardware::NUM_CPU_CORES); + + while (cores_pending_reschedule != 0) { + u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule); + ASSERT(core < Core::Hardware::NUM_CPU_CORES); + if (!must_context_switch || core != current_core) { + auto& phys_core = kernel.PhysicalCore(core); + phys_core.Interrupt(); + } else { + must_context_switch = true; + } + cores_pending_reschedule &= ~(1ULL << core); + } + if (must_context_switch) { + auto core_scheduler = kernel.CurrentScheduler(); + kernel.ExitSVCProfile(); + core_scheduler->RescheduleCurrentCore(); + kernel.EnterSVCProfile(); + } +} + +u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { + std::scoped_lock lock{guard}; + if (Thread* prev_highest_thread = this->state.highest_priority_thread; + prev_highest_thread != highest_thread) { + if (prev_highest_thread != nullptr) { + IncrementScheduledCount(prev_highest_thread); + prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); + } + if (this->state.should_count_idle) { + if (highest_thread != nullptr) { + // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { + // process->SetRunningThread(this->core_id, highest_thread, + // this->state.idle_count); + //} + } else { + this->state.idle_count++; + } + } + + this->state.highest_priority_thread = highest_thread; + this->state.needs_scheduling = true; + return (1ULL << this->core_id); + } else { + return 0; + } +} + +/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* Clear that we need to update. */ + ClearSchedulerUpdateNeeded(kernel); + + u64 cores_needing_scheduling = 0, idle_cores = 0; + Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; + auto& priority_queue = GetPriorityQueue(kernel); + + /* We want to go over all cores, finding the highest priority thread and determining if + * scheduling is needed for that core. */ + for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); + if (top_thread != nullptr) { + ///* If the thread has no waiters, we need to check if the process has a thread pinned. + ///*/ + // if (top_thread->GetNumKernelWaiters() == 0) { + // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { + // if (Thread* pinned = parent->GetPinnedThread(core_id); + // pinned != nullptr && pinned != top_thread) { + // /* We prefer our parent's pinned thread if possible. However, we also + // don't + // * want to schedule un-runnable threads. */ + // if (pinned->GetRawState() == Thread::ThreadState_Runnable) { + // top_thread = pinned; + // } else { + // top_thread = nullptr; + // } + // } + // } + //} + } else { + idle_cores |= (1ULL << core_id); + } + + top_threads[core_id] = top_thread; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + } + + /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ + while (idle_cores != 0) { + u32 core_id = Common::CountTrailingZeroes64(idle_cores); + if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { + s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; + size_t num_candidates = 0; + + /* While we have a suggested thread, try to migrate it! */ + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_thread = + (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; + top_thread != suggested) { + /* Make sure we're not dealing with threads too high priority for migration. */ + if (top_thread != nullptr && + top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { + break; + } + + /* The suggested thread isn't bound to its core, so we can migrate it! */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + + top_threads[core_id] = suggested; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); + break; + } + + /* Note this core as a candidate for migration. */ + ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); + migration_candidates[num_candidates++] = suggested_core; + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our + * candidate cores' top threads. */ + if (suggested == nullptr) { + for (size_t i = 0; i < num_candidates; i++) { + /* Check if there's some other thread that can run on the candidate core. */ + const s32 candidate_core = migration_candidates[i]; + suggested = top_threads[candidate_core]; + if (Thread* next_on_candidate_core = + priority_queue.GetScheduledNext(candidate_core, suggested); + next_on_candidate_core != nullptr) { + /* The candidate core can run some other thread! We'll migrate its current + * top thread to us. */ + top_threads[candidate_core] = next_on_candidate_core; + cores_needing_scheduling |= + kernel.Scheduler(candidate_core) + .UpdateHighestPriorityThread(top_threads[candidate_core]); + + /* Perform the migration. */ + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(candidate_core, suggested); + + top_threads[core_id] = suggested; + cores_needing_scheduling |= + kernel.Scheduler(core_id).UpdateHighestPriorityThread( + top_threads[core_id]); + break; + } + } + } + } + + idle_cores &= ~(1ULL << core_id); + } + + return cores_needing_scheduling; +} + +void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.push_back(std::move(thread)); +} + +void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), + thread_list.end()); +} + +void GlobalSchedulerContext::PreemptThreads() { + // The priority levels at which the global scheduler preempts threads every 10 ms. They are + // ordered from Core 0 to Core 3. + std::array preemption_priorities = {59, 59, 59, 63}; + + ASSERT(IsLocked()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + const u32 priority = preemption_priorities[core_id]; + kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); + } +} + +bool GlobalSchedulerContext::IsLocked() const { + return scheduler_lock.IsLockedByCurrentThread(); +} + +/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, + u32 old_state) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* Check if the state has changed, because if it hasn't there's nothing to do. */ + const auto cur_state = thread->scheduling_state; + if (cur_state == old_state) { + return; + } + + /* Update the priority queues. */ + if (old_state == static_cast(ThreadSchedStatus::Runnable)) { + /* If we were previously runnable, then we're not runnable now, and we should remove. */ + GetPriorityQueue(kernel).Remove(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } else if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* If we're now runnable, then we weren't previously, and we should add. */ + GetPriorityQueue(kernel).PushBack(thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, + Thread* current_thread, u32 old_priority) { + + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* If the thread is runnable, we want to change its priority in the queue. */ + if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + GetPriorityQueue(kernel).ChangePriority( + old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, + s32 old_core) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + /* If the thread is runnable, we want to change its affinity in the queue. */ + if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); + IncrementScheduledCount(thread); + SetSchedulerUpdateNeeded(kernel); + } +} + +void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { + ASSERT(system.GlobalSchedulerContext().IsLocked()); + + /* Get a reference to the priority queue. */ + auto& kernel = system.Kernel(); + auto& priority_queue = GetPriorityQueue(kernel); + + /* Rotate the front of the queue to the end. */ + Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); + Thread* next_thread = nullptr; + if (top_thread != nullptr) { + next_thread = priority_queue.MoveToScheduledBack(top_thread); + if (next_thread != top_thread) { + IncrementScheduledCount(top_thread); + IncrementScheduledCount(next_thread); + } + } + + /* While we have a suggested thread, try to migrate it! */ + { + Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If the next thread is a new thread that has been waiting longer than our + * suggestion, we prefer it to our suggestion. */ + if (top_thread != next_thread && next_thread != nullptr && + next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion + * to the front of the queue. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSamePriorityNext(core_id, suggested); + } + } + + /* Now that we might have migrated a thread with the same priority, check if we can do better. + */ + { + Thread* best_thread = priority_queue.GetScheduledFront(core_id); + if (best_thread == GetCurrentThread()) { + best_thread = priority_queue.GetScheduledNext(core_id, best_thread); + } + + /* If the best thread we can choose has a priority the same or worse than ours, try to + * migrate a higher priority thread. */ + if (best_thread != nullptr && best_thread->GetPriority() >= static_cast(priority)) { + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* If the suggestion's priority is the same as ours, don't bother. */ + if (suggested->GetPriority() >= best_thread->GetPriority()) { + break; + } + + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + * suggestion to the front of the queue. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + } + } + + /* After a rotation, we need a scheduler update. */ + SetSchedulerUpdateNeeded(kernel); +} + +/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) { + return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; +} + +/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { + return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); +} + +/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { + kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); +} + +/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { + kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); +} + +/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) { + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); + scheduler->GetCurrentThread()->DisableDispatch(); + } +} + +/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread) { + if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { + scheduler->GetCurrentThread()->EnableDispatch(); + } + RescheduleCores(kernel, cores_needing_scheduling, global_thread); +} + +/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { + if (IsSchedulerUpdateNeeded(kernel)) { + return UpdateHighestPriorityThreadsImpl(kernel); + } else { + return 0; + } +} + +/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { + return kernel.GlobalSchedulerContext().priority_queue; +} + +void KScheduler::YieldWithoutCoreMigration() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Put the current thread at the back of the queue. */ + Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If the next thread is different, we have an update to perform. */ + if (next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the + * process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } +} + +void KScheduler::YieldWithCoreMigration() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Put the current thread at the back of the queue. */ + Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* While we have a suggested thread, try to migrate it! */ + bool recheck = false; + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the thread running on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + + if (Thread* running_on_suggested_core = + (suggested_core >= 0) + ? kernel.Scheduler(suggested_core).state.highest_priority_thread + : nullptr; + running_on_suggested_core != suggested) { + /* If the current thread's priority is higher than our suggestion's we prefer + * the next thread to the suggestion. */ + /* We also prefer the next thread when the current thread's priority is equal to + * the suggestions, but the next thread has been waiting longer. */ + if ((suggested->GetPriority() > cur_thread.GetPriority()) || + (suggested->GetPriority() == cur_thread.GetPriority() && + next_thread != std::addressof(cur_thread) && + next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + * suggestion to the front of the queue. */ + if (running_on_suggested_core == nullptr || + running_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } else { + /* We couldn't perform a migration, but we should check again on a future + * yield. */ + recheck = true; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If we still have a suggestion or the next thread is different, we have an update to + * perform. */ + if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else if (!recheck) { + /* Otherwise if we don't need to re-check, set the thread's yield count so that we + * won't waste work until the process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } +} + +void KScheduler::YieldToAnyThread() { + auto& kernel = system.Kernel(); + + /* Validate preconditions. */ + ASSERT(CanSchedule(kernel)); + ASSERT(kernel.CurrentProcess() != nullptr); + + /* Get the current thread and process. */ + Thread& cur_thread = *GetCurrentThread(); + Process& cur_process = *kernel.CurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto& priority_queue = GetPriorityQueue(kernel); + + /* Perform the yield. */ + { + SchedulerLock lock(kernel); + + const auto cur_state = cur_thread.scheduling_state; + if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Migrate the current thread to core -1. */ + cur_thread.SetActiveCore(-1); + priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If there's nothing scheduled, we can try to perform a migration. */ + if (priority_queue.GetScheduledFront(core_id) == nullptr) { + /* While we have a suggested thread, try to migrate it! */ + Thread* suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (Thread* top_on_suggested_core = + (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) + : nullptr; + top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + if (top_on_suggested_core == nullptr || + top_on_suggested_core->GetPriority() >= + HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + IncrementScheduledCount(suggested); + } + + /* Regardless of whether we migrated, we had a candidate, so we're done. */ + break; + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If the suggestion is different from the current thread, we need to perform an + * update. */ + if (suggested != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(kernel); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the + * process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } else { + /* Otherwise, we have an update to perform. */ + SetSchedulerUpdateNeeded(kernel); + } + } + } +} + +void GlobalSchedulerContext::Lock() { + scheduler_lock.Lock(); +} + +void GlobalSchedulerContext::Unlock() { + scheduler_lock.Unlock(); +} + +KScheduler::KScheduler(Core::System& system, std::size_t core_id) + : system(system), core_id(core_id) { + switch_fiber = std::make_shared(std::function(OnSwitch), this); + this->state.needs_scheduling = true; + this->state.interrupt_task_thread_runnable = false; + this->state.should_count_idle = false; + this->state.idle_count = 0; + this->state.idle_thread_stack = nullptr; + this->state.highest_priority_thread = nullptr; +} + +KScheduler::~KScheduler() = default; + +Thread* KScheduler::GetCurrentThread() const { + if (current_thread) { + return current_thread; + } + return idle_thread; +} + +u64 KScheduler::GetLastContextSwitchTicks() const { + return last_context_switch_time; +} + +void KScheduler::RescheduleCurrentCore() { + ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); + + auto& phys_core = system.Kernel().PhysicalCore(core_id); + if (phys_core.IsInterrupted()) { + phys_core.ClearInterrupt(); + } + guard.lock(); + if (this->state.needs_scheduling) { + Schedule(); + } else { + guard.unlock(); + } +} + +void KScheduler::OnThreadStart() { + SwitchContextStep2(); +} + +void KScheduler::Unload(Thread* thread) { + if (thread) { + thread->SetIsRunning(false); + if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { + system.ArmInterface(core_id).ExceptionalExit(); + thread->SetContinuousOnSVC(false); + } + if (!thread->IsHLEThread() && !thread->HasExited()) { + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + } + thread->context_guard.unlock(); + } +} + +void KScheduler::Reload(Thread* thread) { + if (thread) { + ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, + "Thread must be runnable."); + + // Cancel any outstanding wakeup events for this thread + thread->SetIsRunning(true); + thread->SetWasRunning(false); + + auto* const thread_owner_process = thread->GetOwnerProcess(); + if (thread_owner_process != nullptr) { + system.Kernel().MakeCurrentProcess(thread_owner_process); + } + if (!thread->IsHLEThread()) { + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + } + } +} + +void KScheduler::SwitchContextStep2() { + // Load context of new thread + Reload(current_thread); + + RescheduleCurrentCore(); +} + +void KScheduler::ScheduleImpl() { + Thread* previous_thread = current_thread; + current_thread = state.highest_priority_thread; + + this->state.needs_scheduling = false; + + if (current_thread == previous_thread) { + guard.unlock(); + return; + } + + Process* const previous_process = system.Kernel().CurrentProcess(); + + UpdateLastContextSwitchTime(previous_thread, previous_process); + + // Save context for previous thread + Unload(previous_thread); + + std::shared_ptr* old_context; + if (previous_thread != nullptr) { + old_context = &previous_thread->GetHostContext(); + } else { + old_context = &idle_thread->GetHostContext(); + } + guard.unlock(); + + Common::Fiber::YieldTo(*old_context, switch_fiber); + /// When a thread wakes up, the scheduler may have changed to other in another core. + auto& next_scheduler = *system.Kernel().CurrentScheduler(); + next_scheduler.SwitchContextStep2(); +} + +void KScheduler::OnSwitch(void* this_scheduler) { + KScheduler* sched = static_cast(this_scheduler); + sched->SwitchToCurrent(); +} + +void KScheduler::SwitchToCurrent() { + while (true) { + { + std::scoped_lock lock{guard}; + current_thread = state.highest_priority_thread; + this->state.needs_scheduling = false; + } + const auto is_switch_pending = [this] { + std::scoped_lock lock{guard}; + return !!this->state.needs_scheduling; + }; + do { + if (current_thread != nullptr && !current_thread->IsHLEThread()) { + current_thread->context_guard.lock(); + if (!current_thread->IsRunnable()) { + current_thread->context_guard.unlock(); + break; + } + if (static_cast(current_thread->GetProcessorID()) != core_id) { + current_thread->context_guard.unlock(); + break; + } + } + std::shared_ptr* next_context; + if (current_thread != nullptr) { + next_context = ¤t_thread->GetHostContext(); + } else { + next_context = &idle_thread->GetHostContext(); + } + Common::Fiber::YieldTo(switch_fiber, *next_context); + } while (!is_switch_pending()); + } +} + +void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { + const u64 prev_switch_ticks = last_context_switch_time; + const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); + const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; + + if (thread != nullptr) { + thread->UpdateCPUTimeTicks(update_ticks); + } + + if (process != nullptr) { + process->UpdateCPUTimeTicks(update_ticks); + } + + last_context_switch_time = most_recent_switch_ticks; +} + +void KScheduler::Initialize() { + std::string name = "Idle Thread Id:" + std::to_string(core_id); + std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); + void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); + ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); + auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, + nullptr, std::move(init_func), init_func_parameter); + idle_thread = thread_res.Unwrap().get(); + + { + KScopedSchedulerLock lock{system.Kernel()}; + idle_thread->SetStatus(ThreadStatus::Ready); + } +} + +SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { + kernel.GlobalSchedulerContext().Lock(); +} + +SchedulerLock::~SchedulerLock() { + kernel.GlobalSchedulerContext().Unlock(); +} + +SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, + Thread* time_task, s64 nanoseconds) + : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ + nanoseconds} { + event_handle = InvalidHandle; +} + +SchedulerLockAndSleep::~SchedulerLockAndSleep() { + if (sleep_cancelled) { + return; + } + auto& time_manager = kernel.TimeManager(); + time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); +} + +void SchedulerLockAndSleep::Release() { + if (sleep_cancelled) { + return; + } + auto& time_manager = kernel.TimeManager(); + time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); + sleep_cancelled = true; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/k_scheduler.h similarity index 57% rename from src/core/hle/kernel/scheduler.h rename to src/core/hle/kernel/k_scheduler.h index 68db4a5efe..535ee34b98 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -1,7 +1,10 @@ -// Copyright 2018 yuzu emulator team +// Copyright 2020 yuzu Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + #pragma once #include @@ -11,8 +14,12 @@ #include "common/common_types.h" #include "common/multi_level_queue.h" +#include "common/scope_exit.h" #include "common/spin_lock.h" +#include "core/core_timing.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/k_priority_queue.h" +#include "core/hle/kernel/k_scheduler_lock.h" #include "core/hle/kernel/thread.h" namespace Common { @@ -30,10 +37,16 @@ class KernelCore; class Process; class SchedulerLock; -class GlobalScheduler final { +using KSchedulerPriorityQueue = + KPriorityQueue; +static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + +class GlobalSchedulerContext final { + friend class KScheduler; + public: - explicit GlobalScheduler(KernelCore& kernel); - ~GlobalScheduler(); + explicit GlobalSchedulerContext(KernelCore& kernel); + ~GlobalSchedulerContext(); /// Adds a new thread to the scheduler void AddThread(std::shared_ptr thread); @@ -46,60 +59,6 @@ public: return thread_list; } - /// Notify the scheduler a thread's status has changed. - void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags); - - /// Notify the scheduler a thread's priority has changed. - void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority); - - /// Notify the scheduler a thread's core and/or affinity mask has changed. - void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core); - - /** - * Takes care of selecting the new scheduled threads in three steps: - * - * 1. First a thread is selected from the top of the priority queue. If no thread - * is obtained then we move to step two, else we are done. - * - * 2. Second we try to get a suggested thread that's not assigned to any core or - * that is not the top thread in that core. - * - * 3. Third is no suggested thread is found, we do a second pass and pick a running - * thread in another core and swap it with its current thread. - * - * returns the cores needing scheduling. - */ - u32 SelectThreads(); - - bool HaveReadyThreads(std::size_t core_id) const { - return !scheduled_queue[core_id].empty(); - } - - /** - * Takes a thread and moves it to the back of the it's priority list. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThread(Thread* thread); - - /** - * Takes a thread and moves it to the back of the it's priority list. - * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or - * a better priority than the next thread in the core. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThreadAndBalanceLoad(Thread* thread); - - /** - * Takes a thread and moves it out of the scheduling queue. - * and into the suggested queue. If no thread can be scheduled afterwards in that core, - * a suggested thread is obtained instead. - * - * @note This operation can be redundant and no scheduling is changed if marked as so. - */ - bool YieldThreadAndWaitForLoadBalancing(Thread* thread); - /** * Rotates the scheduling queues of threads at a preemption priority and then does * some core rebalancing. Preemption priorities can be found in the array @@ -113,15 +72,7 @@ public: return Core::Hardware::NUM_CPU_CORES; } - void SetReselectionPending() { - is_reselection_pending.store(true, std::memory_order_release); - } - - bool IsReselectionPending() const { - return is_reselection_pending.load(std::memory_order_acquire); - } - - void Shutdown(); + bool IsLocked() const; private: friend class SchedulerLock; @@ -133,109 +84,50 @@ private: /// and reschedules current core if needed. void Unlock(); - void EnableInterruptAndSchedule(u32 cores_pending_reschedule, - Core::EmuThreadHandle global_thread); + using LockType = KAbstractSchedulerLock; - /** - * Add a thread to the suggested queue of a cpu core. Suggested threads may be - * picked if no thread is scheduled to run on the core. - */ - void Suggest(u32 priority, std::size_t core, Thread* thread); + KernelCore& kernel; - /** - * Remove a thread to the suggested queue of a cpu core. Suggested threads may be - * picked if no thread is scheduled to run on the core. - */ - void Unsuggest(u32 priority, std::size_t core, Thread* thread); - - /** - * Add a thread to the scheduling queue of a cpu core. The thread is added at the - * back the queue in its priority level. - */ - void Schedule(u32 priority, std::size_t core, Thread* thread); - - /** - * Add a thread to the scheduling queue of a cpu core. The thread is added at the - * front the queue in its priority level. - */ - void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); - - /// Reschedule an already scheduled thread based on a new priority - void Reschedule(u32 priority, std::size_t core, Thread* thread); - - /// Unschedules a thread. - void Unschedule(u32 priority, std::size_t core, Thread* thread); - - /** - * Transfers a thread into an specific core. If the destination_core is -1 - * it will be unscheduled from its source code and added into its suggested - * queue. - */ - void TransferToCore(u32 priority, s32 destination_core, Thread* thread); - - bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); - - static constexpr u32 min_regular_priority = 2; - std::array, Core::Hardware::NUM_CPU_CORES> - scheduled_queue; - std::array, Core::Hardware::NUM_CPU_CORES> - suggested_queue; - std::atomic is_reselection_pending{false}; - - // The priority levels at which the global scheduler preempts threads every 10 ms. They are - // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 62}; - - /// Scheduler lock mechanisms. - bool is_locked{}; - std::mutex inner_lock; - std::atomic scope_lock{}; - Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; - - Common::SpinLock global_list_guard{}; + std::atomic_bool scheduler_update_needed{}; + KSchedulerPriorityQueue priority_queue; + LockType scheduler_lock; /// Lists all thread ids that aren't deleted/etc. std::vector> thread_list; - KernelCore& kernel; + Common::SpinLock global_list_guard{}; }; -class Scheduler final { +class KScheduler final { public: - explicit Scheduler(Core::System& system, std::size_t core_id); - ~Scheduler(); - - /// Returns whether there are any threads that are ready to run. - bool HaveReadyThreads() const; + explicit KScheduler(Core::System& system, std::size_t core_id); + ~KScheduler(); /// Reschedules to the next available thread (call after current thread is suspended) - void TryDoContextSwitch(); + void RescheduleCurrentCore(); + + /// Reschedules cores pending reschedule, to be called on EnableScheduling. + static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread); /// The next two are for SingleCore Only. /// Unload current thread before preempting core. void Unload(Thread* thread); - void Unload(); + /// Reload current thread after core preemption. void Reload(Thread* thread); - void Reload(); /// Gets the current running thread Thread* GetCurrentThread() const; - /// Gets the currently selected thread from the top of the multilevel queue - Thread* GetSelectedThread() const; - /// Gets the timestamp for the last context switch in ticks. u64 GetLastContextSwitchTicks() const; bool ContextSwitchPending() const { - return is_context_switch_pending; + return this->state.needs_scheduling; } void Initialize(); - /// Shutdowns the scheduler. - void Shutdown(); - void OnThreadStart(); std::shared_ptr& ControlContext() { @@ -246,11 +138,90 @@ public: return switch_fiber; } + std::size_t CurrentCoreId() const { + return core_id; + } + + u64 UpdateHighestPriorityThread(Thread* highest_thread); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithoutCoreMigration(); + + /** + * Takes a thread and moves it to the back of the it's priority list. + * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or + * a better priority than the next thread in the core. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldWithCoreMigration(); + + /** + * Takes a thread and moves it out of the scheduling queue. + * and into the suggested queue. If no thread can be scheduled afterwards in that core, + * a suggested thread is obtained instead. + * + * @note This operation can be redundant and no scheduling is changed if marked as so. + */ + void YieldToAnyThread(); + + /// Notify the scheduler a thread's status has changed. + static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); + + /// Notify the scheduler a thread's priority has changed. + static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, + u32 old_priority); + + /// Notify the scheduler a thread's core and/or affinity mask has changed. + static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, s32 old_core); + private: - friend class GlobalScheduler; + /** + * Takes care of selecting the new scheduled threads in three steps: + * + * 1. First a thread is selected from the top of the priority queue. If no thread + * is obtained then we move to step two, else we are done. + * + * 2. Second we try to get a suggested thread that's not assigned to any core or + * that is not the top thread in that core. + * + * 3. Third is no suggested thread is found, we do a second pass and pick a running + * thread in another core and swap it with its current thread. + * + * returns the cores needing scheduling. + */ + static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + + void RotateScheduledQueue(s32 core_id, s32 priority); + +public: + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + +private: + friend class GlobalSchedulerContext; + + static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); + + void Schedule() { + ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); + this->ScheduleImpl(); + } /// Switches the CPU's active thread context to that of the specified thread - void SwitchContext(); + void ScheduleImpl(); + void SwitchThread(Thread* next_thread); /// When a thread wakes up, it must run this through it's new scheduler void SwitchContextStep2(); @@ -271,22 +242,28 @@ private: static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); - std::shared_ptr current_thread = nullptr; - std::shared_ptr selected_thread = nullptr; - std::shared_ptr current_thread_prev = nullptr; - std::shared_ptr selected_thread_set = nullptr; - std::shared_ptr idle_thread = nullptr; +private: + Thread* current_thread{}; + Thread* idle_thread{}; - std::shared_ptr switch_fiber = nullptr; + std::shared_ptr switch_fiber{}; + + struct SchedulingState { + std::atomic needs_scheduling; + bool interrupt_task_thread_runnable{}; + bool should_count_idle{}; + u64 idle_count{}; + Thread* highest_priority_thread{}; + void* idle_thread_stack{}; + }; + + SchedulingState state; Core::System& system; - u64 last_context_switch_time = 0; - u64 idle_selection_count = 0; + u64 last_context_switch_time{}; const std::size_t core_id; Common::SpinLock guard{}; - - bool is_context_switch_pending = false; }; class SchedulerLock { diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 929db696d3..b74e34c40d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -27,6 +27,7 @@ #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/memory/memory_manager.h" @@ -34,7 +35,6 @@ #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" @@ -49,17 +49,18 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : global_scheduler{kernel}, synchronization{system}, time_manager{system}, - global_handle_table{kernel}, system{system} {} + : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ + system} {} void SetMulticore(bool is_multicore) { this->is_multicore = is_multicore; } void Initialize(KernelCore& kernel) { - Shutdown(); RegisterHostThread(); + global_scheduler_context = std::make_unique(kernel); + InitializePhysicalCores(); InitializeSystemResourceLimit(kernel); InitializeMemoryLayout(); @@ -86,29 +87,20 @@ struct KernelCore::Impl { } } - for (std::size_t i = 0; i < cores.size(); i++) { - cores[i].Shutdown(); - schedulers[i].reset(); - } cores.clear(); process_list.clear(); + current_process = nullptr; system_resource_limit = nullptr; global_handle_table.Clear(); + preemption_event = nullptr; - global_scheduler.Shutdown(); - named_ports.clear(); - for (auto& core : cores) { - core.Shutdown(); - } - cores.clear(); - exclusive_monitor.reset(); num_host_threads = 0; @@ -121,7 +113,7 @@ struct KernelCore::Impl { exclusive_monitor = Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - schedulers[i] = std::make_unique(system, i); + schedulers[i] = std::make_unique(system, i); cores.emplace_back(i, system, *schedulers[i], interrupts); } } @@ -155,7 +147,7 @@ struct KernelCore::Impl { "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { SchedulerLock lock(kernel); - global_scheduler.PreemptThreads(); + global_scheduler_context->PreemptThreads(); } const auto time_interval = std::chrono::nanoseconds{ Core::Timing::msToCycles(std::chrono::milliseconds(10))}; @@ -245,7 +237,7 @@ struct KernelCore::Impl { if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { return result; } - const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); + const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler(); const Kernel::Thread* current = sched.GetCurrentThread(); if (current != nullptr && !current->IsPhantomMode()) { result.guest_handle = current->GetGlobalHandle(); @@ -314,7 +306,7 @@ struct KernelCore::Impl { // Lists all processes that exist in the current session. std::vector> process_list; Process* current_process = nullptr; - Kernel::GlobalScheduler global_scheduler; + std::unique_ptr global_scheduler_context; Kernel::Synchronization synchronization; Kernel::TimeManager time_manager; @@ -355,7 +347,7 @@ struct KernelCore::Impl { std::array, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; std::array interrupts{}; - std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; + std::array, Core::Hardware::NUM_CPU_CORES> schedulers{}; bool is_multicore{}; std::thread::id single_core_thread_id{}; @@ -415,19 +407,19 @@ const std::vector>& KernelCore::GetProcessList() const return impl->process_list; } -Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { - return impl->global_scheduler; +Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() { + return *impl->global_scheduler_context; } -const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { - return impl->global_scheduler; +const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const { + return *impl->global_scheduler_context; } -Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { +Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) { return *impl->schedulers[id]; } -const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { +const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const { return *impl->schedulers[id]; } @@ -451,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { return impl->cores[core_id]; } -Kernel::Scheduler& KernelCore::CurrentScheduler() { +Kernel::KScheduler* KernelCore::CurrentScheduler() { u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return *impl->schedulers[core_id]; -} - -const Kernel::Scheduler& KernelCore::CurrentScheduler() const { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return *impl->schedulers[core_id]; + if (core_id >= Core::Hardware::NUM_CPU_CORES) { + // This is expected when called from not a guest thread + return {}; + } + return impl->schedulers[core_id].get(); } std::array& KernelCore::Interrupts() { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index a73a930396..5846c3f39c 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -35,12 +35,12 @@ class SlabHeap; class AddressArbiter; class ClientPort; -class GlobalScheduler; +class GlobalSchedulerContext; class HandleTable; class PhysicalCore; class Process; class ResourceLimit; -class Scheduler; +class KScheduler; class SharedMemory; class Synchronization; class Thread; @@ -102,16 +102,16 @@ public: const std::vector>& GetProcessList() const; /// Gets the sole instance of the global scheduler - Kernel::GlobalScheduler& GlobalScheduler(); + Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); /// Gets the sole instance of the global scheduler - const Kernel::GlobalScheduler& GlobalScheduler() const; + const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const; /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' - Kernel::Scheduler& Scheduler(std::size_t id); + Kernel::KScheduler& Scheduler(std::size_t id); /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' - const Kernel::Scheduler& Scheduler(std::size_t id) const; + const Kernel::KScheduler& Scheduler(std::size_t id) const; /// Gets the an instance of the respective physical CPU core. Kernel::PhysicalCore& PhysicalCore(std::size_t id); @@ -120,10 +120,7 @@ public: const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; /// Gets the sole instance of the Scheduler at the current running core. - Kernel::Scheduler& CurrentScheduler(); - - /// Gets the sole instance of the Scheduler at the current running core. - const Kernel::Scheduler& CurrentScheduler() const; + Kernel::KScheduler* CurrentScheduler(); /// Gets the an instance of the current physical CPU core. Kernel::PhysicalCore& CurrentPhysicalCore(); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 8f6c944d17..6299b1342a 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -11,11 +11,11 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/result.h" #include "core/memory.h" @@ -73,7 +73,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, auto& kernel = system.Kernel(); std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); { SchedulerLock lock(kernel); // The mutex address must be 4-byte aligned @@ -156,7 +156,7 @@ ResultCode Mutex::Release(VAddr address) { SchedulerLock lock(kernel); std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); + SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); auto [result, new_owner] = Unlock(current_thread, address); diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index d6a5742bd3..7fea45f96c 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp @@ -7,14 +7,14 @@ #include "core/arm/dynarmic/arm_dynarmic_32.h" #include "core/arm/dynarmic/arm_dynarmic_64.h" #include "core/core.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/scheduler.h" namespace Kernel { PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, - Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts) + Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts) : core_index{core_index}, system{system}, scheduler{scheduler}, interrupts{interrupts}, guard{std::make_unique()} {} @@ -37,17 +37,12 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { void PhysicalCore::Run() { arm_interface->Run(); - arm_interface->ClearExclusiveState(); } void PhysicalCore::Idle() { interrupts[core_index].AwaitInterrupt(); } -void PhysicalCore::Shutdown() { - scheduler.Shutdown(); -} - bool PhysicalCore::IsInterrupted() const { return interrupts[core_index].IsInterrupted(); } diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 37513130a8..b4d3c15de9 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h @@ -15,7 +15,7 @@ class SpinLock; } namespace Kernel { -class Scheduler; +class KScheduler; } // namespace Kernel namespace Core { @@ -28,7 +28,7 @@ namespace Kernel { class PhysicalCore { public: - PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler, + PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts); ~PhysicalCore(); @@ -55,9 +55,6 @@ public: /// Check if this core is interrupted bool IsInterrupted() const; - // Shutdown this physical core. - void Shutdown(); - bool IsInitialized() const { return arm_interface != nullptr; } @@ -82,18 +79,18 @@ public: return core_index; } - Kernel::Scheduler& Scheduler() { + Kernel::KScheduler& Scheduler() { return scheduler; } - const Kernel::Scheduler& Scheduler() const { + const Kernel::KScheduler& Scheduler() const { return scheduler; } private: const std::size_t core_index; Core::System& system; - Kernel::Scheduler& scheduler; + Kernel::KScheduler& scheduler; Core::CPUInterrupts& interrupts; std::unique_ptr guard; std::unique_ptr arm_interface; diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index b17529dee5..238c03a13f 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -15,13 +15,13 @@ #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/errors.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block_manager.h" #include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/lock.h" #include "core/memory.h" @@ -314,7 +314,7 @@ void Process::PrepareForTermination() { if (thread->GetOwnerProcess() != this) continue; - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) + if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) continue; // TODO(Subv): When are the other running/ready threads terminated? @@ -325,7 +325,7 @@ void Process::PrepareForTermination() { } }; - stop_threads(system.GlobalScheduler().GetThreadList()); + stop_threads(system.GlobalSchedulerContext().GetThreadList()); FreeTLSRegion(tls_region_address); tls_region_address = 0; diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 6e286419e9..927f88fed9 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -6,10 +6,10 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/hle/kernel/errors.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" namespace Kernel { diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp deleted file mode 100644 index 9a969fdb55..0000000000 --- a/src/core/hle/kernel/scheduler.cpp +++ /dev/null @@ -1,819 +0,0 @@ -// Copyright 2018 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. -// -// SelectThreads, Yield functions originally by TuxSH. -// licensed under GPLv2 or later under exception provided by the author. - -#include -#include -#include -#include -#include - -#include "common/assert.h" -#include "common/bit_util.h" -#include "common/fiber.h" -#include "common/logging/log.h" -#include "core/arm/arm_interface.h" -#include "core/core.h" -#include "core/core_timing.h" -#include "core/cpu_manager.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" -#include "core/hle/kernel/time_manager.h" - -namespace Kernel { - -GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} - -GlobalScheduler::~GlobalScheduler() = default; - -void GlobalScheduler::AddThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.push_back(std::move(thread)); -} - -void GlobalScheduler::RemoveThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); -} - -u32 GlobalScheduler::SelectThreads() { - ASSERT(is_locked); - const auto update_thread = [](Thread* thread, Scheduler& sched) { - std::scoped_lock lock{sched.guard}; - if (thread != sched.selected_thread_set.get()) { - if (thread == nullptr) { - ++sched.idle_selection_count; - } - sched.selected_thread_set = SharedFrom(thread); - } - const bool reschedule_pending = - sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); - sched.is_context_switch_pending = reschedule_pending; - std::atomic_thread_fence(std::memory_order_seq_cst); - return reschedule_pending; - }; - if (!is_reselection_pending.load()) { - return 0; - } - std::array top_threads{}; - - u32 idle_cores{}; - - // Step 1: Get top thread in schedule queue. - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - Thread* top_thread = - scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); - if (top_thread != nullptr) { - // TODO(Blinkhawk): Implement Thread Pinning - } else { - idle_cores |= (1U << core); - } - top_threads[core] = top_thread; - } - - while (idle_cores != 0) { - u32 core_id = Common::CountTrailingZeroes32(idle_cores); - - if (!suggested_queue[core_id].empty()) { - std::array migration_candidates{}; - std::size_t num_candidates = 0; - auto iter = suggested_queue[core_id].begin(); - Thread* suggested = nullptr; - // Step 2: Try selecting a suggested thread. - while (iter != suggested_queue[core_id].end()) { - suggested = *iter; - iter++; - s32 suggested_core_id = suggested->GetProcessorID(); - Thread* top_thread = - suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; - if (top_thread != suggested) { - if (top_thread != nullptr && - top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { - suggested = nullptr; - break; - // There's a too high thread to do core migration, cancel - } - TransferToCore(suggested->GetPriority(), static_cast(core_id), suggested); - break; - } - suggested = nullptr; - migration_candidates[num_candidates++] = suggested_core_id; - } - // Step 3: Select a suggested thread from another core - if (suggested == nullptr) { - for (std::size_t i = 0; i < num_candidates; i++) { - s32 candidate_core = migration_candidates[i]; - suggested = top_threads[candidate_core]; - auto it = scheduled_queue[candidate_core].begin(); - it++; - Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; - if (next != nullptr) { - TransferToCore(suggested->GetPriority(), static_cast(core_id), - suggested); - top_threads[candidate_core] = next; - break; - } else { - suggested = nullptr; - } - } - } - top_threads[core_id] = suggested; - } - - idle_cores &= ~(1U << core_id); - } - u32 cores_needing_context_switch{}; - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - Scheduler& sched = kernel.Scheduler(core); - ASSERT(top_threads[core] == nullptr || - static_cast(top_threads[core]->GetProcessorID()) == core); - if (update_thread(top_threads[core], sched)) { - cores_needing_context_switch |= (1U << core); - } - } - return cores_needing_context_switch; -} - -bool GlobalScheduler::YieldThread(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should use critical section, etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - const u32 priority = yielding_thread->GetPriority(); - - // Yield the thread - Reschedule(priority, core_id, yielding_thread); - const Thread* const winner = scheduled_queue[core_id].front(); - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, - // etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - const u32 priority = yielding_thread->GetPriority(); - - // Yield the thread - Reschedule(priority, core_id, yielding_thread); - - std::array current_threads; - for (std::size_t i = 0; i < current_threads.size(); i++) { - current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); - } - - Thread* next_thread = scheduled_queue[core_id].front(priority); - Thread* winner = nullptr; - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (source_core >= 0) { - if (current_threads[source_core] != nullptr) { - if (thread == current_threads[source_core] || - current_threads[source_core]->GetPriority() < min_regular_priority) { - continue; - } - } - } - if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || - next_thread->GetPriority() < thread->GetPriority()) { - if (thread->GetPriority() <= priority) { - winner = thread; - break; - } - } - } - - if (winner != nullptr) { - if (winner != yielding_thread) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - } - } else { - winner = next_thread; - } - - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { - ASSERT(is_locked); - // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, - // etc. - if (!yielding_thread->IsRunnable()) { - // Normally this case shouldn't happen except for SetThreadActivity. - is_reselection_pending.store(true, std::memory_order_release); - return false; - } - Thread* winner = nullptr; - const u32 core_id = static_cast(yielding_thread->GetProcessorID()); - - // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead - TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); - - // If the core is idle, perform load balancing, excluding the threads that have just used this - // function... - if (scheduled_queue[core_id].empty()) { - // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 - std::array current_threads; - for (std::size_t i = 0; i < current_threads.size(); i++) { - current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); - } - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (source_core < 0 || thread == current_threads[source_core]) { - continue; - } - if (current_threads[source_core] == nullptr || - current_threads[source_core]->GetPriority() >= min_regular_priority) { - winner = thread; - } - break; - } - if (winner != nullptr) { - if (winner != yielding_thread) { - TransferToCore(winner->GetPriority(), static_cast(core_id), winner); - } - } else { - winner = yielding_thread; - } - } else { - winner = scheduled_queue[core_id].front(); - } - - if (kernel.GetCurrentHostThreadID() != core_id) { - is_reselection_pending.store(true, std::memory_order_release); - } - - return AskForReselectionOrMarkRedundant(yielding_thread, winner); -} - -void GlobalScheduler::PreemptThreads() { - ASSERT(is_locked); - for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - const u32 priority = preemption_priorities[core_id]; - - if (scheduled_queue[core_id].size(priority) > 0) { - if (scheduled_queue[core_id].size(priority) > 1) { - scheduled_queue[core_id].front(priority)->IncrementYieldCount(); - } - scheduled_queue[core_id].yield(priority); - if (scheduled_queue[core_id].size(priority) > 1) { - scheduled_queue[core_id].front(priority)->IncrementYieldCount(); - } - } - - Thread* current_thread = - scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front(); - Thread* winner = nullptr; - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (thread->GetPriority() != priority) { - continue; - } - if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() - ? nullptr - : scheduled_queue[source_core].front(); - if (next_thread != nullptr && next_thread->GetPriority() < 2) { - break; - } - if (next_thread == thread) { - continue; - } - } - if (current_thread != nullptr && - current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { - winner = thread; - break; - } - } - - if (winner != nullptr) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - current_thread = - winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; - } - - if (current_thread != nullptr && current_thread->GetPriority() > priority) { - for (auto& thread : suggested_queue[core_id]) { - const s32 source_core = thread->GetProcessorID(); - if (thread->GetPriority() < priority) { - continue; - } - if (source_core >= 0) { - Thread* next_thread = scheduled_queue[source_core].empty() - ? nullptr - : scheduled_queue[source_core].front(); - if (next_thread != nullptr && next_thread->GetPriority() < 2) { - break; - } - if (next_thread == thread) { - continue; - } - } - if (current_thread != nullptr && - current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { - winner = thread; - break; - } - } - - if (winner != nullptr) { - TransferToCore(winner->GetPriority(), s32(core_id), winner); - current_thread = winner; - } - } - - is_reselection_pending.store(true, std::memory_order_release); - } -} - -void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, - Core::EmuThreadHandle global_thread) { - u32 current_core = global_thread.host_handle; - bool must_context_switch = global_thread.guest_handle != InvalidHandle && - (current_core < Core::Hardware::NUM_CPU_CORES); - while (cores_pending_reschedule != 0) { - u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); - ASSERT(core < Core::Hardware::NUM_CPU_CORES); - if (!must_context_switch || core != current_core) { - auto& phys_core = kernel.PhysicalCore(core); - phys_core.Interrupt(); - } else { - must_context_switch = true; - } - cores_pending_reschedule &= ~(1U << core); - } - if (must_context_switch) { - auto& core_scheduler = kernel.CurrentScheduler(); - kernel.ExitSVCProfile(); - core_scheduler.TryDoContextSwitch(); - kernel.EnterSVCProfile(); - } -} - -void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - suggested_queue[core].add(thread, priority); -} - -void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - suggested_queue[core].remove(thread, priority); -} - -void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); - scheduled_queue[core].add(thread, priority); -} - -void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); - scheduled_queue[core].add(thread, priority, false); -} - -void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - scheduled_queue[core].remove(thread, priority); - scheduled_queue[core].add(thread, priority); -} - -void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { - ASSERT(is_locked); - scheduled_queue[core].remove(thread, priority); -} - -void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { - ASSERT(is_locked); - const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; - const s32 source_core = thread->GetProcessorID(); - if (source_core == destination_core || !schedulable) { - return; - } - thread->SetProcessorID(destination_core); - if (source_core >= 0) { - Unschedule(priority, static_cast(source_core), thread); - } - if (destination_core >= 0) { - Unsuggest(priority, static_cast(destination_core), thread); - Schedule(priority, static_cast(destination_core), thread); - } - if (source_core >= 0) { - Suggest(priority, static_cast(source_core), thread); - } -} - -bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, - const Thread* winner) { - if (current_thread == winner) { - current_thread->IncrementYieldCount(); - return true; - } else { - is_reselection_pending.store(true, std::memory_order_release); - return false; - } -} - -void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { - if (old_flags == thread->scheduling_state) { - return; - } - ASSERT(is_locked); - - if (old_flags == static_cast(ThreadSchedStatus::Runnable)) { - // In this case the thread was running, now it's pausing/exitting - if (thread->processor_id >= 0) { - Unschedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Unsuggest(thread->current_priority, core, thread); - } - } - } else if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { - // The thread is now set to running from being stopped - if (thread->processor_id >= 0) { - Schedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Suggest(thread->current_priority, core, thread); - } - } - } - - SetReselectionPending(); -} - -void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) { - if (thread->scheduling_state != static_cast(ThreadSchedStatus::Runnable)) { - return; - } - ASSERT(is_locked); - if (thread->processor_id >= 0) { - Unschedule(old_priority, static_cast(thread->processor_id), thread); - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Unsuggest(old_priority, core, thread); - } - } - - if (thread->processor_id >= 0) { - if (thread == kernel.CurrentScheduler().GetCurrentThread()) { - SchedulePrepend(thread->current_priority, static_cast(thread->processor_id), - thread); - } else { - Schedule(thread->current_priority, static_cast(thread->processor_id), thread); - } - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (core != static_cast(thread->processor_id) && - thread->affinity_mask.GetAffinity(core)) { - Suggest(thread->current_priority, core, thread); - } - } - thread->IncrementYieldCount(); - SetReselectionPending(); -} - -void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, - s32 old_core) { - if (thread->scheduling_state != static_cast(ThreadSchedStatus::Runnable) || - thread->current_priority >= THREADPRIO_COUNT) { - return; - } - ASSERT(is_locked); - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (((old_affinity_mask >> core) & 1) != 0) { - if (core == static_cast(old_core)) { - Unschedule(thread->current_priority, core, thread); - } else { - Unsuggest(thread->current_priority, core, thread); - } - } - } - - for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - if (thread->affinity_mask.GetAffinity(core)) { - if (core == static_cast(thread->processor_id)) { - Schedule(thread->current_priority, core, thread); - } else { - Suggest(thread->current_priority, core, thread); - } - } - } - - thread->IncrementYieldCount(); - SetReselectionPending(); -} - -void GlobalScheduler::Shutdown() { - for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { - scheduled_queue[core].clear(); - suggested_queue[core].clear(); - } - thread_list.clear(); -} - -void GlobalScheduler::Lock() { - Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); - ASSERT(!current_thread.IsInvalid()); - if (current_thread == current_owner) { - ++scope_lock; - } else { - inner_lock.lock(); - is_locked = true; - current_owner = current_thread; - ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); - scope_lock = 1; - } -} - -void GlobalScheduler::Unlock() { - if (--scope_lock != 0) { - ASSERT(scope_lock > 0); - return; - } - u32 cores_pending_reschedule = SelectThreads(); - Core::EmuThreadHandle leaving_thread = current_owner; - current_owner = Core::EmuThreadHandle::InvalidHandle(); - scope_lock = 1; - is_locked = false; - inner_lock.unlock(); - EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); -} - -Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { - switch_fiber = std::make_shared(std::function(OnSwitch), this); -} - -Scheduler::~Scheduler() = default; - -bool Scheduler::HaveReadyThreads() const { - return system.GlobalScheduler().HaveReadyThreads(core_id); -} - -Thread* Scheduler::GetCurrentThread() const { - if (current_thread) { - return current_thread.get(); - } - return idle_thread.get(); -} - -Thread* Scheduler::GetSelectedThread() const { - return selected_thread.get(); -} - -u64 Scheduler::GetLastContextSwitchTicks() const { - return last_context_switch_time; -} - -void Scheduler::TryDoContextSwitch() { - auto& phys_core = system.Kernel().CurrentPhysicalCore(); - if (phys_core.IsInterrupted()) { - phys_core.ClearInterrupt(); - } - guard.lock(); - if (is_context_switch_pending) { - SwitchContext(); - } else { - guard.unlock(); - } -} - -void Scheduler::OnThreadStart() { - SwitchContextStep2(); -} - -void Scheduler::Unload(Thread* thread) { - if (thread) { - thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - thread->SetIsRunning(false); - if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { - system.ArmInterface(core_id).ExceptionalExit(); - thread->SetContinuousOnSVC(false); - } - if (!thread->IsHLEThread() && !thread->HasExited()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - thread->context_guard.unlock(); - } -} - -void Scheduler::Unload() { - Unload(current_thread.get()); -} - -void Scheduler::Reload(Thread* thread) { - if (thread) { - ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, - "Thread must be runnable."); - - // Cancel any outstanding wakeup events for this thread - thread->SetIsRunning(true); - thread->SetWasRunning(false); - thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - - auto* const thread_owner_process = thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - if (!thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(thread->GetContext32()); - cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } - } -} - -void Scheduler::Reload() { - Reload(current_thread.get()); -} - -void Scheduler::SwitchContextStep2() { - // Load context of new thread - Reload(selected_thread.get()); - - TryDoContextSwitch(); -} - -void Scheduler::SwitchContext() { - current_thread_prev = current_thread; - selected_thread = selected_thread_set; - Thread* previous_thread = current_thread_prev.get(); - Thread* new_thread = selected_thread.get(); - current_thread = selected_thread; - - is_context_switch_pending = false; - - if (new_thread == previous_thread) { - guard.unlock(); - return; - } - - Process* const previous_process = system.Kernel().CurrentProcess(); - - UpdateLastContextSwitchTime(previous_thread, previous_process); - - // Save context for previous thread - Unload(previous_thread); - - std::shared_ptr* old_context; - if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); - } else { - old_context = &idle_thread->GetHostContext(); - } - guard.unlock(); - - Common::Fiber::YieldTo(*old_context, switch_fiber); - /// When a thread wakes up, the scheduler may have changed to other in another core. - auto& next_scheduler = system.Kernel().CurrentScheduler(); - next_scheduler.SwitchContextStep2(); -} - -void Scheduler::OnSwitch(void* this_scheduler) { - Scheduler* sched = static_cast(this_scheduler); - sched->SwitchToCurrent(); -} - -void Scheduler::SwitchToCurrent() { - while (true) { - { - std::scoped_lock lock{guard}; - selected_thread = selected_thread_set; - current_thread = selected_thread; - is_context_switch_pending = false; - } - const auto is_switch_pending = [this] { - std::scoped_lock lock{guard}; - return is_context_switch_pending; - }; - do { - if (current_thread != nullptr && !current_thread->IsHLEThread()) { - current_thread->context_guard.lock(); - if (!current_thread->IsRunnable()) { - current_thread->context_guard.unlock(); - break; - } - if (static_cast(current_thread->GetProcessorID()) != core_id) { - current_thread->context_guard.unlock(); - break; - } - } - std::shared_ptr* next_context; - if (current_thread != nullptr) { - next_context = ¤t_thread->GetHostContext(); - } else { - next_context = &idle_thread->GetHostContext(); - } - Common::Fiber::YieldTo(switch_fiber, *next_context); - } while (!is_switch_pending()); - } -} - -void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { - const u64 prev_switch_ticks = last_context_switch_time; - const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); - const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; - - if (thread != nullptr) { - thread->UpdateCPUTimeTicks(update_ticks); - } - - if (process != nullptr) { - process->UpdateCPUTimeTicks(update_ticks); - } - - last_context_switch_time = most_recent_switch_ticks; -} - -void Scheduler::Initialize() { - std::string name = "Idle Thread Id:" + std::to_string(core_id); - std::function init_func = Core::CpuManager::GetIdleThreadStartFunc(); - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - ThreadType type = static_cast(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); - auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast(core_id), 0, - nullptr, std::move(init_func), init_func_parameter); - idle_thread = std::move(thread_res).Unwrap(); -} - -void Scheduler::Shutdown() { - current_thread = nullptr; - selected_thread = nullptr; -} - -SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { - kernel.GlobalScheduler().Lock(); -} - -SchedulerLock::~SchedulerLock() { - kernel.GlobalScheduler().Unlock(); -} - -SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, - Thread* time_task, s64 nanoseconds) - : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ - nanoseconds} { - event_handle = InvalidHandle; -} - -SchedulerLockAndSleep::~SchedulerLockAndSleep() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); -} - -void SchedulerLockAndSleep::Release() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); - sleep_cancelled = true; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 8c19f25341..bf2c900280 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -14,9 +14,9 @@ #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/server_session.h" #include "core/hle/kernel/session.h" #include "core/hle/kernel/thread.h" diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 9742aaf4c4..2612a6b0d7 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -24,6 +24,7 @@ #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/memory/page_table.h" @@ -32,7 +33,6 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_types.h" @@ -332,7 +332,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + auto& kernel = system.Kernel(); + const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); std::shared_ptr session = handle_table.Get(handle); if (!session) { LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); @@ -341,9 +342,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - auto thread = system.CurrentScheduler().GetCurrentThread(); + auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { - SchedulerLock lock(system.Kernel()); + SchedulerLock lock(kernel); thread->InvalidateHLECallback(); thread->SetStatus(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); @@ -352,12 +353,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { if (thread->HasHLECallback()) { Handle event_handle = thread->GetHLETimeEvent(); if (event_handle != InvalidHandle) { - auto& time_manager = system.Kernel().TimeManager(); + auto& time_manager = kernel.TimeManager(); time_manager.UnscheduleTimeEvent(event_handle); } { - SchedulerLock lock(system.Kernel()); + SchedulerLock lock(kernel); auto* sync_object = thread->GetHLESyncObject(); sync_object->RemoveWaitingThread(SharedFrom(thread)); } @@ -665,7 +666,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { handle_debug_buffer(info1, info2); - auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); + auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast(thread_processor_id)).LogBacktrace(); } @@ -917,7 +918,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha } const auto& core_timing = system.CoreTiming(); - const auto& scheduler = system.CurrentScheduler(); + const auto& scheduler = *system.Kernel().CurrentScheduler(); const auto* const current_thread = scheduler.GetCurrentThread(); const bool same_thread = current_thread == thread.get(); @@ -1085,7 +1086,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act return ERR_INVALID_HANDLE; } - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1118,7 +1119,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H return ERR_INVALID_HANDLE; } - if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { + if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); return ERR_BUSY; } @@ -1475,7 +1476,7 @@ static void ExitProcess(Core::System& system) { current_process->PrepareForTermination(); // Kill the current thread - system.CurrentScheduler().GetCurrentThread()->Stop(); + system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop(); } static void ExitProcess32(Core::System& system) { @@ -1576,8 +1577,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) { static void ExitThread(Core::System& system) { LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); - auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); - system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); + auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); + system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); current_thread->Stop(); } @@ -1590,37 +1591,31 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); enum class SleepType : s64 { - YieldWithoutLoadBalancing = 0, - YieldWithLoadBalancing = -1, + YieldWithoutCoreMigration = 0, + YieldWithCoreMigration = -1, YieldAndWaitForLoadBalancing = -2, }; - auto& scheduler = system.CurrentScheduler(); - auto* const current_thread = scheduler.GetCurrentThread(); - bool is_redundant = false; - + auto& scheduler = *system.Kernel().CurrentScheduler(); if (nanoseconds <= 0) { switch (static_cast(nanoseconds)) { - case SleepType::YieldWithoutLoadBalancing: { - auto pair = current_thread->YieldSimple(); - is_redundant = pair.second; + case SleepType::YieldWithoutCoreMigration: { + scheduler.YieldWithoutCoreMigration(); break; } - case SleepType::YieldWithLoadBalancing: { - auto pair = current_thread->YieldAndBalanceLoad(); - is_redundant = pair.second; + case SleepType::YieldWithCoreMigration: { + scheduler.YieldWithCoreMigration(); break; } case SleepType::YieldAndWaitForLoadBalancing: { - auto pair = current_thread->YieldAndWaitForLoadBalancing(); - is_redundant = pair.second; + scheduler.YieldToAnyThread(); break; } default: UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); } } else { - current_thread->Sleep(nanoseconds); + scheduler.GetCurrentThread()->Sleep(nanoseconds); } } @@ -1656,8 +1651,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); auto& kernel = system.Kernel(); Handle event_handle; - Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); - auto* const current_process = system.Kernel().CurrentProcess(); + Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); + auto* const current_process = kernel.CurrentProcess(); { SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); const auto& handle_table = current_process->GetHandleTable(); @@ -2627,7 +2622,7 @@ void Call(Core::System& system, u32 immediate) { auto& kernel = system.Kernel(); kernel.EnterSVCProfile(); - auto* thread = system.CurrentScheduler().GetCurrentThread(); + auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); thread->SetContinuousOnSVC(true); const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 8b875d853c..342fb4516a 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -5,8 +5,8 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/kernel/thread.h" @@ -37,7 +37,7 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const { std::pair Synchronization::WaitFor( std::vector>& sync_objects, s64 nano_seconds) { auto& kernel = system.Kernel(); - auto* const thread = system.CurrentScheduler().GetCurrentThread(); + auto* const thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 38b4a09876..804e07f2bd 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -17,10 +17,10 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -186,9 +186,11 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->status = ThreadStatus::Dormant; thread->entry_point = entry_point; thread->stack_top = stack_top; + thread->disable_count = 1; thread->tpidr_el0 = 0; thread->nominal_priority = thread->current_priority = priority; - thread->last_running_ticks = 0; + thread->schedule_count = -1; + thread->last_scheduled_tick = 0; thread->processor_id = processor_id; thread->ideal_core = processor_id; thread->affinity_mask.SetAffinity(processor_id, true); @@ -201,7 +203,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->owner_process = owner_process; thread->type = type_flags; if ((type_flags & THREADTYPE_IDLE) == 0) { - auto& scheduler = kernel.GlobalScheduler(); + auto& scheduler = kernel.GlobalSchedulerContext(); scheduler.AddThread(thread); } if (owner_process) { @@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) { return RESULT_SUCCESS; } -std::pair Thread::YieldSimple() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThread(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - -std::pair Thread::YieldAndBalanceLoad() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - -std::pair Thread::YieldAndWaitForLoadBalancing() { - bool is_redundant = false; - { - SchedulerLock lock(kernel); - is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this); - } - return {RESULT_SUCCESS, is_redundant}; -} - void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { const u32 old_state = scheduling_state; pausing_state |= static_cast(flag); const u32 base_scheduling = static_cast(GetSchedulingStatus()); scheduling_state = base_scheduling | pausing_state; - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { @@ -442,19 +417,20 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { pausing_state &= ~static_cast(flag); const u32 base_scheduling = static_cast(GetSchedulingStatus()); scheduling_state = base_scheduling | pausing_state; - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { const u32 old_state = scheduling_state; scheduling_state = (scheduling_state & static_cast(ThreadSchedMasks::HighMask)) | static_cast(new_status); - kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); + KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::SetCurrentPriority(u32 new_priority) { const u32 old_priority = std::exchange(current_priority, new_priority); - kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); + KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), + old_priority); } ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { @@ -480,10 +456,10 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { if (use_override) { ideal_core_override = new_core; } else { - const auto old_affinity_mask = affinity_mask.GetAffinityMask(); + const auto old_affinity_mask = affinity_mask; affinity_mask.SetAffinityMask(new_affinity_mask); ideal_core = new_core; - if (old_affinity_mask != new_affinity_mask) { + if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { const s32 old_core = processor_id; if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { if (static_cast(ideal_core) < 0) { @@ -493,7 +469,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { processor_id = ideal_core; } } - kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); + KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); } } return RESULT_SUCCESS; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 5192ecff12..f1aa358a4e 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -28,10 +28,10 @@ class System; namespace Kernel { -class GlobalScheduler; +class GlobalSchedulerContext; class KernelCore; class Process; -class Scheduler; +class KScheduler; enum ThreadPriority : u32 { THREADPRIO_HIGHEST = 0, ///< Highest thread priority @@ -346,8 +346,11 @@ public: void SetStatus(ThreadStatus new_status); - u64 GetLastRunningTicks() const { - return last_running_ticks; + constexpr s64 GetLastScheduledTick() const { + return this->last_scheduled_tick; + } + constexpr void SetLastScheduledTick(s64 tick) { + this->last_scheduled_tick = tick; } u64 GetTotalCPUTimeTicks() const { @@ -362,10 +365,18 @@ public: return processor_id; } + s32 GetActiveCore() const { + return GetProcessorID(); + } + void SetProcessorID(s32 new_core) { processor_id = new_core; } + void SetActiveCore(s32 new_core) { + processor_id = new_core; + } + Process* GetOwnerProcess() { return owner_process; } @@ -479,21 +490,11 @@ public: /// Sleeps this thread for the given amount of nanoseconds. ResultCode Sleep(s64 nanoseconds); - /// Yields this thread without rebalancing loads. - std::pair YieldSimple(); - - /// Yields this thread and does a load rebalancing. - std::pair YieldAndBalanceLoad(); - - /// Yields this thread and if the core is left idle, loads are rebalanced - std::pair YieldAndWaitForLoadBalancing(); - - void IncrementYieldCount() { - yield_count++; + constexpr s64 GetYieldScheduleCount() const { + return this->schedule_count; } - - u64 GetYieldCount() const { - return yield_count; + constexpr void SetYieldScheduleCount(s64 count) { + this->schedule_count = count; } ThreadSchedStatus GetSchedulingStatus() const { @@ -569,9 +570,62 @@ public: return has_exited; } + struct QueueEntry { + private: + Thread* prev; + Thread* next; + + public: + constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ + } + + constexpr void Initialize() { + this->prev = nullptr; + this->next = nullptr; + } + + constexpr Thread* GetPrev() const { + return this->prev; + } + constexpr Thread* GetNext() const { + return this->next; + } + constexpr void SetPrev(Thread* t) { + this->prev = t; + } + constexpr void SetNext(Thread* t) { + this->next = t; + } + }; + + constexpr QueueEntry& GetPriorityQueueEntry(s32 core) { + return this->per_core_priority_queue_entry[core]; + } + constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const { + return this->per_core_priority_queue_entry[core]; + } + + s32 GetDisableDispatchCount() const { + return disable_count; + } + + void DisableDispatch() { + ASSERT(GetDisableDispatchCount() >= 0); + disable_count++; + } + + void EnableDispatch() { + ASSERT(GetDisableDispatchCount() > 0); + disable_count--; + } + + ThreadStatus status = ThreadStatus::Dormant; + u32 scheduling_state = 0; + private: - friend class GlobalScheduler; - friend class Scheduler; + friend class GlobalSchedulerContext; + friend class KScheduler; + friend class Process; void SetSchedulingStatus(ThreadSchedStatus new_status); void AddSchedulingFlag(ThreadSchedFlags flag); @@ -586,10 +640,9 @@ private: u64 thread_id = 0; - ThreadStatus status = ThreadStatus::Dormant; - VAddr entry_point = 0; VAddr stack_top = 0; + std::atomic_int disable_count = 0; ThreadType type; @@ -603,9 +656,8 @@ private: u32 current_priority = 0; u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. - u64 last_running_ticks = 0; ///< CPU tick when thread was last running - u64 yield_count = 0; ///< Number of redundant yields carried by this thread. - ///< a redundant yield is one where no scheduling is changed + s64 schedule_count{}; + s64 last_scheduled_tick{}; s32 processor_id = 0; @@ -647,7 +699,9 @@ private: Handle hle_time_event; SynchronizationObject* hle_object; - Scheduler* scheduler = nullptr; + KScheduler* scheduler = nullptr; + + QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{}; u32 ideal_core{0xFFFFFFFF}; KAffinityMask affinity_mask{}; @@ -655,7 +709,6 @@ private: s32 ideal_core_override = -1; u32 affinity_override_count = 0; - u32 scheduling_state = 0; u32 pausing_state = 0; bool is_running = false; bool is_waiting_on_sync = false; diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index caf329bfb3..8e47696946 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -7,8 +7,8 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp index 7b7ac282dd..abc753d5d1 100644 --- a/src/core/hle/service/time/time.cpp +++ b/src/core/hle/service/time/time.cpp @@ -10,8 +10,8 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/service/time/interface.h" #include "core/hle/service/time/time.h" #include "core/hle/service/time/time_sharedmemory.h" diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index c4ae1d61fd..546a2cd4d6 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -13,10 +13,10 @@ #include "core/arm/arm_interface.h" #include "core/core.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/synchronization_object.h" #include "core/hle/kernel/thread.h" #include "core/memory.h" @@ -101,7 +101,7 @@ std::vector> WaitTreeItem::MakeThreadItemList() }; const auto& system = Core::System::GetInstance(); - add_threads(system.GlobalScheduler().GetThreadList()); + add_threads(system.GlobalSchedulerContext().GetThreadList()); return item_list; } @@ -356,7 +356,7 @@ std::vector> WaitTreeThread::GetChildren() const { .arg(thread.GetPriority()) .arg(thread.GetNominalPriority()))); list.push_back(std::make_unique( - tr("last running ticks = %1").arg(thread.GetLastRunningTicks()))); + tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); if (mutex_wait_address != 0) { From 8d3e06349e12e7de17c334619f1f986792d1de4b Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 16:43:18 -0800 Subject: [PATCH 10/26] hle: kernel: Separate KScheduler from GlobalSchedulerContext class. --- src/common/CMakeLists.txt | 1 - src/common/multi_level_queue.h | 345 ------------------ src/core/CMakeLists.txt | 2 + .../hle/kernel/global_scheduler_context.cpp | 55 +++ .../hle/kernel/global_scheduler_context.h | 79 ++++ src/core/hle/kernel/k_scheduler.cpp | 48 +-- src/core/hle/kernel/k_scheduler.h | 74 +--- src/tests/CMakeLists.txt | 1 - src/tests/common/multi_level_queue.cpp | 55 --- 9 files changed, 140 insertions(+), 520 deletions(-) delete mode 100644 src/common/multi_level_queue.h create mode 100644 src/core/hle/kernel/global_scheduler_context.cpp create mode 100644 src/core/hle/kernel/global_scheduler_context.h delete mode 100644 src/tests/common/multi_level_queue.cpp diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index fc2ed99990..8e51104a17 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -141,7 +141,6 @@ add_library(common STATIC microprofile.h microprofileui.h misc.cpp - multi_level_queue.h page_table.cpp page_table.h param_package.cpp diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h deleted file mode 100644 index 4b305bf409..0000000000 --- a/src/common/multi_level_queue.h +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2019 TuxSH -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include -#include -#include - -#include "common/bit_util.h" -#include "common/common_types.h" - -namespace Common { - -/** - * A MultiLevelQueue is a type of priority queue which has the following characteristics: - * - iteratable through each of its elements. - * - back can be obtained. - * - O(1) add, lookup (both front and back) - * - discrete priorities and a max of 64 priorities (limited domain) - * This type of priority queue is normaly used for managing threads within an scheduler - */ -template -class MultiLevelQueue { -public: - using value_type = T; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = value_type*; - using const_pointer = const value_type*; - - using difference_type = typename std::pointer_traits::difference_type; - using size_type = std::size_t; - - template - class iterator_impl { - public: - using iterator_category = std::bidirectional_iterator_tag; - using value_type = T; - using pointer = std::conditional_t; - using reference = std::conditional_t; - using difference_type = typename std::pointer_traits::difference_type; - - friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) { - if (lhs.IsEnd() && rhs.IsEnd()) - return true; - return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it); - } - - friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) { - return !operator==(lhs, rhs); - } - - reference operator*() const { - return *it; - } - - pointer operator->() const { - return it.operator->(); - } - - iterator_impl& operator++() { - if (IsEnd()) { - return *this; - } - - ++it; - - if (it == GetEndItForPrio()) { - u64 prios = mlq.used_priorities; - prios &= ~((1ULL << (current_priority + 1)) - 1); - if (prios == 0) { - current_priority = static_cast(mlq.depth()); - } else { - current_priority = CountTrailingZeroes64(prios); - it = GetBeginItForPrio(); - } - } - return *this; - } - - iterator_impl& operator--() { - if (IsEnd()) { - if (mlq.used_priorities != 0) { - current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities); - it = GetEndItForPrio(); - --it; - } - } else if (it == GetBeginItForPrio()) { - u64 prios = mlq.used_priorities; - prios &= (1ULL << current_priority) - 1; - if (prios != 0) { - current_priority = CountTrailingZeroes64(prios); - it = GetEndItForPrio(); - --it; - } - } else { - --it; - } - return *this; - } - - iterator_impl operator++(int) { - const iterator_impl v{*this}; - ++(*this); - return v; - } - - iterator_impl operator--(int) { - const iterator_impl v{*this}; - --(*this); - return v; - } - - // allow implicit const->non-const - iterator_impl(const iterator_impl& other) - : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} - - iterator_impl(const iterator_impl& other) - : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} - - iterator_impl& operator=(const iterator_impl& other) { - mlq = other.mlq; - it = other.it; - current_priority = other.current_priority; - return *this; - } - - friend class iterator_impl; - iterator_impl() = default; - - private: - friend class MultiLevelQueue; - using container_ref = - std::conditional_t; - using list_iterator = std::conditional_t::const_iterator, - typename std::list::iterator>; - - explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority) - : mlq(mlq), it(it), current_priority(current_priority) {} - explicit iterator_impl(container_ref mlq, u32 current_priority) - : mlq(mlq), it(), current_priority(current_priority) {} - - bool IsEnd() const { - return current_priority == mlq.depth(); - } - - list_iterator GetBeginItForPrio() const { - return mlq.levels[current_priority].begin(); - } - - list_iterator GetEndItForPrio() const { - return mlq.levels[current_priority].end(); - } - - container_ref mlq; - list_iterator it; - u32 current_priority; - }; - - using iterator = iterator_impl; - using const_iterator = iterator_impl; - - void add(const T& element, u32 priority, bool send_back = true) { - if (send_back) - levels[priority].push_back(element); - else - levels[priority].push_front(element); - used_priorities |= 1ULL << priority; - } - - void remove(const T& element, u32 priority) { - auto it = ListIterateTo(levels[priority], element); - if (it == levels[priority].end()) - return; - levels[priority].erase(it); - if (levels[priority].empty()) { - used_priorities &= ~(1ULL << priority); - } - } - - void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) { - remove(element, old_priority); - add(element, new_priority, !adjust_front); - } - void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) { - adjust(*it, old_priority, new_priority, adjust_front); - } - - void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) { - ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority], - ListIterateTo(levels[priority], element)); - - other.used_priorities |= 1ULL << priority; - - if (levels[priority].empty()) { - used_priorities &= ~(1ULL << priority); - } - } - - void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) { - transfer_to_front(*it, priority, other); - } - - void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) { - ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority], - ListIterateTo(levels[priority], element)); - - other.used_priorities |= 1ULL << priority; - - if (levels[priority].empty()) { - used_priorities &= ~(1ULL << priority); - } - } - - void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) { - transfer_to_back(*it, priority, other); - } - - void yield(u32 priority, std::size_t n = 1) { - ListShiftForward(levels[priority], n); - } - - [[nodiscard]] std::size_t depth() const { - return Depth; - } - - [[nodiscard]] std::size_t size(u32 priority) const { - return levels[priority].size(); - } - - [[nodiscard]] std::size_t size() const { - u64 priorities = used_priorities; - std::size_t size = 0; - while (priorities != 0) { - const u64 current_priority = CountTrailingZeroes64(priorities); - size += levels[current_priority].size(); - priorities &= ~(1ULL << current_priority); - } - return size; - } - - [[nodiscard]] bool empty() const { - return used_priorities == 0; - } - - [[nodiscard]] bool empty(u32 priority) const { - return (used_priorities & (1ULL << priority)) == 0; - } - - [[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const { - const u64 priorities = - max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1)); - return priorities == 0 ? Depth : static_cast(CountTrailingZeroes64(priorities)); - } - - [[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const { - const u64 priorities = min_priority >= Depth - 1 - ? used_priorities - : (used_priorities & ((1ULL << (min_priority + 1)) - 1)); - return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities); - } - - [[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const { - const u32 priority = highest_priority_set(max_prio); - return priority == Depth ? cend() - : const_iterator{*this, levels[priority].cbegin(), priority}; - } - [[nodiscard]] const_iterator begin(u32 max_prio = 0) const { - return cbegin(max_prio); - } - [[nodiscard]] iterator begin(u32 max_prio = 0) { - const u32 priority = highest_priority_set(max_prio); - return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority}; - } - - [[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const { - return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1); - } - [[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const { - return cend(min_prio); - } - [[nodiscard]] iterator end(u32 min_prio = Depth - 1) { - return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1); - } - - [[nodiscard]] T& front(u32 max_priority = 0) { - const u32 priority = highest_priority_set(max_priority); - return levels[priority == Depth ? 0 : priority].front(); - } - [[nodiscard]] const T& front(u32 max_priority = 0) const { - const u32 priority = highest_priority_set(max_priority); - return levels[priority == Depth ? 0 : priority].front(); - } - - [[nodiscard]] T& back(u32 min_priority = Depth - 1) { - const u32 priority = lowest_priority_set(min_priority); // intended - return levels[priority == Depth ? 63 : priority].back(); - } - [[nodiscard]] const T& back(u32 min_priority = Depth - 1) const { - const u32 priority = lowest_priority_set(min_priority); // intended - return levels[priority == Depth ? 63 : priority].back(); - } - - void clear() { - used_priorities = 0; - for (std::size_t i = 0; i < Depth; i++) { - levels[i].clear(); - } - } - -private: - using const_list_iterator = typename std::list::const_iterator; - - static void ListShiftForward(std::list& list, const std::size_t shift = 1) { - if (shift >= list.size()) { - return; - } - - const auto begin_range = list.begin(); - const auto end_range = std::next(begin_range, shift); - list.splice(list.end(), list, begin_range, end_range); - } - - static void ListSplice(std::list& in_list, const_list_iterator position, - std::list& out_list, const_list_iterator element) { - in_list.splice(position, out_list, element); - } - - [[nodiscard]] static const_list_iterator ListIterateTo(const std::list& list, - const T& element) { - auto it = list.cbegin(); - while (it != list.cend() && *it != element) { - ++it; - } - return it; - } - - std::array, Depth> levels; - u64 used_priorities = 0; -}; - -} // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 662839ff87..ee61f22c05 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -148,6 +148,8 @@ add_library(core STATIC hle/kernel/code_set.cpp hle/kernel/code_set.h hle/kernel/errors.h + hle/kernel/global_scheduler_context.cpp + hle/kernel/global_scheduler_context.h hle/kernel/handle_table.cpp hle/kernel/handle_table.h hle/kernel/hle_ipc.cpp diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp new file mode 100644 index 0000000000..40e9adf478 --- /dev/null +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -0,0 +1,55 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "common/assert.h" +#include "core/core.h" +#include "core/hle/kernel/global_scheduler_context.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) + : kernel{kernel}, scheduler_lock{kernel} {} + +GlobalSchedulerContext::~GlobalSchedulerContext() = default; + +void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.push_back(std::move(thread)); +} + +void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { + std::scoped_lock lock{global_list_guard}; + thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), + thread_list.end()); +} + +void GlobalSchedulerContext::PreemptThreads() { + // The priority levels at which the global scheduler preempts threads every 10 ms. They are + // ordered from Core 0 to Core 3. + std::array preemption_priorities = {59, 59, 59, 63}; + + ASSERT(IsLocked()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + const u32 priority = preemption_priorities[core_id]; + kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); + } +} + +bool GlobalSchedulerContext::IsLocked() const { + return scheduler_lock.IsLockedByCurrentThread(); +} + +void GlobalSchedulerContext::Lock() { + scheduler_lock.Lock(); +} + +void GlobalSchedulerContext::Unlock() { + scheduler_lock.Unlock(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h new file mode 100644 index 0000000000..40fe44cc0e --- /dev/null +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -0,0 +1,79 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "common/common_types.h" +#include "common/spin_lock.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/k_priority_queue.h" +#include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/thread.h" + +namespace Kernel { + +class KernelCore; +class SchedulerLock; + +using KSchedulerPriorityQueue = + KPriorityQueue; +static constexpr s32 HighestCoreMigrationAllowedPriority = 2; + +class GlobalSchedulerContext final { + friend class KScheduler; + +public: + explicit GlobalSchedulerContext(KernelCore& kernel); + ~GlobalSchedulerContext(); + + /// Adds a new thread to the scheduler + void AddThread(std::shared_ptr thread); + + /// Removes a thread from the scheduler + void RemoveThread(std::shared_ptr thread); + + /// Returns a list of all threads managed by the scheduler + const std::vector>& GetThreadList() const { + return thread_list; + } + + /** + * Rotates the scheduling queues of threads at a preemption priority and then does + * some core rebalancing. Preemption priorities can be found in the array + * 'preemption_priorities'. + * + * @note This operation happens every 10ms. + */ + void PreemptThreads(); + + /// Returns true if the global scheduler lock is acquired + bool IsLocked() const; + +private: + friend class SchedulerLock; + + /// Lock the scheduler to the current thread. + void Lock(); + + /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling + /// and reschedules current core if needed. + void Unlock(); + + using LockType = KAbstractSchedulerLock; + + KernelCore& kernel; + + std::atomic_bool scheduler_update_needed{}; + KSchedulerPriorityQueue priority_queue; + LockType scheduler_lock; + + /// Lists all thread ids that aren't deleted/etc. + std::vector> thread_list; + Common::SpinLock global_list_guard{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 7f7da610d2..c7e2eabd43 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -5,12 +5,6 @@ // This file references various implementation details from Atmosphere, an open-source firmware for // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. -#include -#include -#include -#include -#include - #include "common/assert.h" #include "common/bit_util.h" #include "common/fiber.h" @@ -19,10 +13,10 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -34,11 +28,6 @@ static void IncrementScheduledCount(Kernel::Thread* thread) { } } -GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) - : kernel{kernel}, scheduler_lock{kernel} {} - -GlobalSchedulerContext::~GlobalSchedulerContext() = default; - /*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, Core::EmuThreadHandle global_thread) { u32 current_core = global_thread.host_handle; @@ -205,33 +194,6 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { return cores_needing_scheduling; } -void GlobalSchedulerContext::AddThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.push_back(std::move(thread)); -} - -void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { - std::scoped_lock lock{global_list_guard}; - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); -} - -void GlobalSchedulerContext::PreemptThreads() { - // The priority levels at which the global scheduler preempts threads every 10 ms. They are - // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 63}; - - ASSERT(IsLocked()); - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - const u32 priority = preemption_priorities[core_id]; - kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); - } -} - -bool GlobalSchedulerContext::IsLocked() const { - return scheduler_lock.IsLockedByCurrentThread(); -} - /*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -635,14 +597,6 @@ void KScheduler::YieldToAnyThread() { } } -void GlobalSchedulerContext::Lock() { - scheduler_lock.Lock(); -} - -void GlobalSchedulerContext::Unlock() { - scheduler_lock.Unlock(); -} - KScheduler::KScheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { switch_fiber = std::make_shared(std::function(OnSwitch), this); diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 535ee34b98..7f020d96e2 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -8,94 +8,27 @@ #pragma once #include -#include -#include -#include #include "common/common_types.h" -#include "common/multi_level_queue.h" -#include "common/scope_exit.h" #include "common/spin_lock.h" -#include "core/core_timing.h" -#include "core/hardware_properties.h" +#include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" -#include "core/hle/kernel/thread.h" namespace Common { class Fiber; } namespace Core { -class ARM_Interface; class System; -} // namespace Core +} namespace Kernel { class KernelCore; class Process; class SchedulerLock; - -using KSchedulerPriorityQueue = - KPriorityQueue; -static constexpr s32 HighestCoreMigrationAllowedPriority = 2; - -class GlobalSchedulerContext final { - friend class KScheduler; - -public: - explicit GlobalSchedulerContext(KernelCore& kernel); - ~GlobalSchedulerContext(); - - /// Adds a new thread to the scheduler - void AddThread(std::shared_ptr thread); - - /// Removes a thread from the scheduler - void RemoveThread(std::shared_ptr thread); - - /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { - return thread_list; - } - - /** - * Rotates the scheduling queues of threads at a preemption priority and then does - * some core rebalancing. Preemption priorities can be found in the array - * 'preemption_priorities'. - * - * @note This operation happens every 10ms. - */ - void PreemptThreads(); - - u32 CpuCoresCount() const { - return Core::Hardware::NUM_CPU_CORES; - } - - bool IsLocked() const; - -private: - friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - using LockType = KAbstractSchedulerLock; - - KernelCore& kernel; - - std::atomic_bool scheduler_update_needed{}; - KSchedulerPriorityQueue priority_queue; - LockType scheduler_lock; - - /// Lists all thread ids that aren't deleted/etc. - std::vector> thread_list; - Common::SpinLock global_list_guard{}; -}; +class Thread; class KScheduler final { public: @@ -221,7 +154,6 @@ private: /// Switches the CPU's active thread context to that of the specified thread void ScheduleImpl(); - void SwitchThread(Thread* next_thread); /// When a thread wakes up, it must run this through it's new scheduler void SwitchContextStep2(); diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 47ef30aa91..d80b0b688f 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -2,7 +2,6 @@ add_executable(tests common/bit_field.cpp common/bit_utils.cpp common/fibers.cpp - common/multi_level_queue.cpp common/param_package.cpp common/ring_buffer.cpp core/arm/arm_test_common.cpp diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp deleted file mode 100644 index cca7ec7dae..0000000000 --- a/src/tests/common/multi_level_queue.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 Yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include -#include -#include "common/common_types.h" -#include "common/multi_level_queue.h" - -namespace Common { - -TEST_CASE("MultiLevelQueue", "[common]") { - std::array values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0}; - Common::MultiLevelQueue mlq; - REQUIRE(mlq.empty()); - mlq.add(values[2], 2); - mlq.add(values[7], 7); - mlq.add(values[3], 3); - mlq.add(values[4], 4); - mlq.add(values[0], 0); - mlq.add(values[5], 5); - mlq.add(values[6], 6); - mlq.add(values[1], 1); - u32 index = 0; - bool all_set = true; - for (auto& f : mlq) { - all_set &= (f == values[index]); - index++; - } - REQUIRE(all_set); - REQUIRE(!mlq.empty()); - f32 v = 8.0; - mlq.add(v, 2); - v = -7.0; - mlq.add(v, 2, false); - REQUIRE(mlq.front(2) == -7.0); - mlq.yield(2); - REQUIRE(mlq.front(2) == values[2]); - REQUIRE(mlq.back(2) == -7.0); - REQUIRE(mlq.empty(8)); - v = 10.0; - mlq.add(v, 8); - mlq.adjust(v, 8, 9); - REQUIRE(mlq.front(9) == v); - REQUIRE(mlq.empty(8)); - REQUIRE(!mlq.empty(9)); - mlq.adjust(values[0], 0, 9); - REQUIRE(mlq.highest_priority_set() == 1); - REQUIRE(mlq.lowest_priority_set() == 9); - mlq.remove(values[1], 1); - REQUIRE(mlq.highest_priority_set() == 2); - REQUIRE(mlq.empty(1)); -} - -} // namespace Common From 4756cb203e8ef09377988eb1b49ca20ef45f4492 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 21:56:02 -0800 Subject: [PATCH 11/26] hle: kernel: Separate KScopedSchedulerLockAndSleep from k_scheduler. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/address_arbiter.cpp | 5 +- .../hle/kernel/global_scheduler_context.cpp | 8 --- .../hle/kernel/global_scheduler_context.h | 10 +--- src/core/hle/kernel/hle_ipc.cpp | 8 +-- src/core/hle/kernel/k_scheduler.cpp | 25 +-------- src/core/hle/kernel/k_scheduler.h | 19 ------- .../k_scoped_scheduler_lock_and_sleep.h | 56 +++++++++++++++++++ src/core/hle/kernel/svc.cpp | 3 +- src/core/hle/kernel/synchronization.cpp | 3 +- src/core/hle/kernel/thread.cpp | 3 +- 11 files changed, 72 insertions(+), 69 deletions(-) create mode 100644 src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index ee61f22c05..5f6dce52a7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -159,6 +159,7 @@ add_library(core STATIC hle/kernel/k_scheduler.cpp hle/kernel/k_scheduler.h hle/kernel/k_scheduler_lock.h + hle/kernel/k_scoped_scheduler_lock_and_sleep.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory/address_space_info.cpp diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index bc32be18b6..ac49131735 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -13,6 +13,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -157,7 +158,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); if (current_thread->IsPendingTermination()) { lock.CancelSleep(); @@ -227,7 +228,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); if (current_thread->IsPendingTermination()) { lock.CancelSleep(); diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 40e9adf478..a9cb48b38e 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -44,12 +44,4 @@ bool GlobalSchedulerContext::IsLocked() const { return scheduler_lock.IsLockedByCurrentThread(); } -void GlobalSchedulerContext::Lock() { - scheduler_lock.Lock(); -} - -void GlobalSchedulerContext::Unlock() { - scheduler_lock.Unlock(); -} - } // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 40fe44cc0e..39c383746a 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -55,15 +55,7 @@ public: private: friend class SchedulerLock; - - /// Lock the scheduler to the current thread. - void Lock(); - - /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling - /// and reschedules current core if needed. - void Unlock(); - - using LockType = KAbstractSchedulerLock; + friend class KScopedSchedulerLockAndSleep; KernelCore& kernel; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 7eda89786e..e75e80ad00 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" @@ -56,9 +57,9 @@ std::shared_ptr HLERequestContext::SleepClientThread( writable_event = pair.writable; } + Handle event_handle = InvalidHandle; { - Handle event_handle = InvalidHandle; - SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); thread->SetHLECallback( [context = *this, callback](std::shared_ptr thread) mutable -> bool { ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT @@ -74,9 +75,8 @@ std::shared_ptr HLERequestContext::SleepClientThread( thread->SetStatus(ThreadStatus::WaitHLEEvent); thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); readable_event->AddWaitingThread(thread); - lock.Release(); - thread->SetHLETimeEvent(event_handle); } + thread->SetHLETimeEvent(event_handle); is_thread_waiting = true; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index c7e2eabd43..4661474989 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -14,6 +14,7 @@ #include "core/core_timing.h" #include "core/cpu_manager.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" @@ -800,28 +801,4 @@ SchedulerLock::~SchedulerLock() { kernel.GlobalSchedulerContext().Unlock(); } -SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, - Thread* time_task, s64 nanoseconds) - : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ - nanoseconds} { - event_handle = InvalidHandle; -} - -SchedulerLockAndSleep::~SchedulerLockAndSleep() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); -} - -void SchedulerLockAndSleep::Release() { - if (sleep_cancelled) { - return; - } - auto& time_manager = kernel.TimeManager(); - time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); - sleep_cancelled = true; -} - } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 7f020d96e2..5ba0f3c323 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -207,23 +207,4 @@ protected: KernelCore& kernel; }; -class SchedulerLockAndSleep : public SchedulerLock { -public: - explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task, - s64 nanoseconds); - ~SchedulerLockAndSleep(); - - void CancelSleep() { - sleep_cancelled = true; - } - - void Release(); - -private: - Handle& event_handle; - Thread* time_task; - s64 nanoseconds; - bool sleep_cancelled{}; -}; - } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h new file mode 100644 index 0000000000..f11a62216a --- /dev/null +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -0,0 +1,56 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/common_types.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/time_manager.h" + +namespace Kernel { + +class KScopedSchedulerLockAndSleep { +private: + KernelCore& kernel; + s64 timeout_tick{}; + Thread* thread{}; + Handle* event_handle{}; + +public: + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Thread* t, s64 timeout) + : kernel(kernel), timeout_tick(timeout), thread(t) { + /* Lock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Lock(); + } + + explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, + s64 timeout) + : kernel(kernel), event_handle(&event_handle), timeout_tick(timeout), thread(t) { + /* Lock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Lock(); + } + + ~KScopedSchedulerLockAndSleep() { + /* Register the sleep. */ + if (this->timeout_tick > 0) { + auto& time_manager = kernel.TimeManager(); + Handle handle{}; + time_manager.ScheduleTimeEvent(event_handle ? *event_handle : handle, this->thread, + this->timeout_tick); + } + + /* Unlock the scheduler. */ + kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); + } + + void CancelSleep() { + this->timeout_tick = 0; + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2612a6b0d7..2760a307c4 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -25,6 +25,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/memory/page_table.h" @@ -1654,7 +1655,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); auto* const current_process = kernel.CurrentProcess(); { - SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); const auto& handle_table = current_process->GetHandleTable(); std::shared_ptr thread = handle_table.Get(thread_handle); ASSERT(thread); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 342fb4516a..6651ad90cd 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -6,6 +6,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/synchronization_object.h" @@ -40,7 +41,7 @@ std::pair Synchronization::WaitFor( auto* const thread = kernel.CurrentScheduler()->GetCurrentThread(); Handle event_handle = InvalidHandle; { - SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(), [thread](const std::shared_ptr& object) { diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 804e07f2bd..6f89238caa 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" @@ -393,7 +394,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) { ResultCode Thread::Sleep(s64 nanoseconds) { Handle event_handle{}; { - SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); + KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); SetStatus(ThreadStatus::WaitSleep); } From ccce6cb3be062fc7ae162bed32202538ebc2e3d9 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:26:42 -0800 Subject: [PATCH 12/26] hle: kernel: Migrate to KScopedSchedulerLock. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/address_arbiter.cpp | 10 ++--- .../hle/kernel/global_scheduler_context.h | 12 +++++- src/core/hle/kernel/k_scheduler.cpp | 15 +++---- src/core/hle/kernel/k_scheduler.h | 10 ++--- src/core/hle/kernel/k_scoped_lock.h | 39 +++++++++++++++++++ src/core/hle/kernel/kernel.cpp | 4 +- src/core/hle/kernel/mutex.cpp | 6 +-- src/core/hle/kernel/process.cpp | 8 ++-- src/core/hle/kernel/readable_event.cpp | 2 +- src/core/hle/kernel/server_session.cpp | 2 +- src/core/hle/kernel/svc.cpp | 8 ++-- src/core/hle/kernel/synchronization.cpp | 4 +- src/core/hle/kernel/thread.cpp | 17 ++++---- src/core/hle/kernel/time_manager.cpp | 2 +- 15 files changed, 92 insertions(+), 48 deletions(-) create mode 100644 src/core/hle/kernel/k_scoped_lock.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 5f6dce52a7..eb1fbcb61b 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -159,6 +159,7 @@ add_library(core STATIC hle/kernel/k_scheduler.cpp hle/kernel/k_scheduler.h hle/kernel/k_scheduler_lock.h + hle/kernel/k_scoped_lock.h hle/kernel/k_scoped_scheduler_lock_and_sleep.h hle/kernel/kernel.cpp hle/kernel/kernel.h diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index ac49131735..20ffa7d476 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -59,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v } ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const std::vector> waiting_threads = GetThreadsWaitingOnAddress(address); WakeThreads(waiting_threads, num_to_wake); @@ -68,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -93,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); auto& memory = system.Memory(); // Ensure that we can write to the address. @@ -211,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); @@ -266,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (current_thread->IsWaitingForArbitration()) { RemoveThread(SharedFrom(current_thread)); current_thread->WaitForArbitration(false); diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 39c383746a..c4bc23eedd 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -27,6 +27,8 @@ class GlobalSchedulerContext final { friend class KScheduler; public: + using LockType = KAbstractSchedulerLock; + explicit GlobalSchedulerContext(KernelCore& kernel); ~GlobalSchedulerContext(); @@ -53,8 +55,16 @@ public: /// Returns true if the global scheduler lock is acquired bool IsLocked() const; + LockType& SchedulerLock() { + return scheduler_lock; + } + + const LockType& SchedulerLock() const { + return scheduler_lock; + } + private: - friend class SchedulerLock; + friend class KScopedSchedulerLock; friend class KScopedSchedulerLockAndSleep; KernelCore& kernel; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 4661474989..9645fee226 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -410,7 +410,7 @@ void KScheduler::YieldWithoutCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -451,7 +451,7 @@ void KScheduler::YieldWithCoreMigration() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -541,7 +541,7 @@ void KScheduler::YieldToAnyThread() { /* Perform the yield. */ { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { @@ -793,12 +793,9 @@ void KScheduler::Initialize() { } } -SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { - kernel.GlobalSchedulerContext().Lock(); -} +KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) + : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} -SchedulerLock::~SchedulerLock() { - kernel.GlobalSchedulerContext().Unlock(); -} +KScopedSchedulerLock::~KScopedSchedulerLock() = default; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 5ba0f3c323..d52ecc0db4 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -14,6 +14,7 @@ #include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_priority_queue.h" #include "core/hle/kernel/k_scheduler_lock.h" +#include "core/hle/kernel/k_scoped_lock.h" namespace Common { class Fiber; @@ -198,13 +199,10 @@ private: Common::SpinLock guard{}; }; -class SchedulerLock { +class KScopedSchedulerLock : KScopedLock { public: - [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); - ~SchedulerLock(); - -protected: - KernelCore& kernel; + explicit KScopedSchedulerLock(KernelCore& kernel); + ~KScopedSchedulerLock(); }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h new file mode 100644 index 0000000000..03320859fd --- /dev/null +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -0,0 +1,39 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphere, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel { + +template +concept KLockable = !std::is_reference::value && requires(T & t) { + { t.Lock() } + ->std::same_as; + { t.Unlock() } + ->std::same_as; +}; + +template +requires KLockable class KScopedLock : NonCopyable { + +private: + T* lock_ptr; + +public: + explicit KScopedLock(T* l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */ + } + ~KScopedLock() { + this->lock_ptr->Unlock(); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b74e34c40d..04cae3a433 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -146,7 +146,7 @@ struct KernelCore::Impl { preemption_event = Core::Timing::CreateEvent( "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); global_scheduler_context->PreemptThreads(); } const auto time_interval = std::chrono::nanoseconds{ @@ -612,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { - SchedulerLock lock(*this); + KScopedSchedulerLock lock(*this); ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetStatus(status); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 6299b1342a..4f8075e0e9 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -75,7 +75,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, std::shared_ptr current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); // The mutex address must be 4-byte aligned if ((address % sizeof(u32)) != 0) { return ERR_INVALID_ADDRESS; @@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { owner->RemoveMutexWaiter(current_thread); @@ -153,7 +153,7 @@ std::pair> Mutex::Unlock(std::shared_ptr current_thread = SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 238c03a13f..b905b486a7 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { - SchedulerLock lock{kernel}; + KScopedSchedulerLock lock{kernel}; thread->SetStatus(ThreadStatus::Ready); } } @@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) { } ResultCode Process::ClearSignalState() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (status == ProcessStatus::Exited) { LOG_ERROR(Kernel, "called on a terminated process instance."); return ERR_INVALID_STATE; @@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector& tls_pages) { } VAddr Process::CreateTLSRegion() { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; tls_page_iter != tls_pages.cend()) { return *tls_page_iter->ReserveSlot(); @@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() { } void Process::FreeTLSRegion(VAddr tls_address) { - SchedulerLock lock(system.Kernel()); + KScopedSchedulerLock lock(system.Kernel()); const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index 927f88fed9..cea262ce02 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -39,7 +39,7 @@ void ReadableEvent::Clear() { } ResultCode ReadableEvent::Reset() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!is_signaled) { LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", GetObjectId(), GetTypeName(), GetName()); diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index bf2c900280..78e41b13ed 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -171,7 +171,7 @@ ResultCode ServerSession::CompleteSyncRequest() { // Some service requests require the thread to block { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { context.GetThread().ResumeFromWait(); context.GetThread().SetSynchronizationResults(nullptr, result); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2760a307c4..30d60aeb62 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -345,7 +345,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); thread->InvalidateHLECallback(); thread->SetStatus(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); @@ -359,7 +359,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* sync_object = thread->GetHLESyncObject(); sync_object->RemoveWaitingThread(SharedFrom(thread)); } @@ -1691,7 +1691,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* owner = current_thread->GetLockOwner(); if (owner != nullptr) { @@ -1724,7 +1724,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ // Retrieve a list of all threads that are waiting for this condition variable. auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto* const current_process = kernel.CurrentProcess(); std::vector> waiting_threads = current_process->GetConditionVariableThreads(condition_variable_addr); diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 6651ad90cd..d3f520ea2d 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -19,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {} void Synchronization::SignalObject(SynchronizationObject& obj) const { auto& kernel = system.Kernel(); - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (obj.IsSignaled()) { for (auto thread : obj.GetWaitingThreads()) { if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { @@ -90,7 +90,7 @@ std::pair Synchronization::WaitFor( } { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ResultCode signaling_result = thread->GetSignalingResult(); SynchronizationObject* signaling_object = thread->GetSignalingObject(); thread->SetSynchronizationObjects(nullptr); diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 6f89238caa..a4f9e0d97a 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -51,7 +51,7 @@ Thread::~Thread() = default; void Thread::Stop() { { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Dead); Signal(); kernel.GlobalHandleTable().Close(global_handle); @@ -68,7 +68,7 @@ void Thread::Stop() { } void Thread::ResumeFromWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); switch (status) { case ThreadStatus::Paused: case ThreadStatus::WaitSynch: @@ -100,19 +100,18 @@ void Thread::ResumeFromWait() { } void Thread::OnWakeUp() { - SchedulerLock lock(kernel); - + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); } ResultCode Thread::Start() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); SetStatus(ThreadStatus::Ready); return RESULT_SUCCESS; } void Thread::CancelWait() { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { is_sync_cancelled = true; return; @@ -228,7 +227,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy } void Thread::SetPriority(u32 priority) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, "Invalid priority value."); nominal_priority = priority; @@ -365,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr thread) { } ResultCode Thread::SetActivity(ThreadActivity value) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); auto sched_status = GetSchedulingStatus(); @@ -435,7 +434,7 @@ void Thread::SetCurrentPriority(u32 new_priority) { } ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { - SchedulerLock lock(kernel); + KScopedSchedulerLock lock(kernel); const auto HighestSetCore = [](u64 mask, u32 max_cores) { for (s32 core = static_cast(max_cores - 1); core >= 0; core--) { if (((mask >> core) & 1) != 0) { diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 8e47696946..aea53cdb0a 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -18,7 +18,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { time_manager_event_type = Core::Timing::CreateEvent( "Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { - const SchedulerLock lock(system.Kernel()); + const KScopedSchedulerLock lock(system.Kernel()); const auto proper_handle = static_cast(thread_handle); if (cancelled_events[proper_handle]) { return; From b9b7e4f915ffc47836342a8741fe8e46a3bd619c Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:43:35 -0800 Subject: [PATCH 13/26] kernel: time_manager: Add missing lock guards. --- src/core/hle/kernel/time_manager.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aea53cdb0a..79628e2b42 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -20,10 +20,16 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { const KScopedSchedulerLock lock(system.Kernel()); const auto proper_handle = static_cast(thread_handle); - if (cancelled_events[proper_handle]) { - return; + + std::shared_ptr thread; + { + std::lock_guard lock{mutex}; + if (cancelled_events[proper_handle]) { + return; + } + thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); } - auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); + if (thread) { // Thread can be null if process has exited thread->OnWakeUp(); @@ -56,6 +62,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) { } void TimeManager::CancelTimeEvent(Thread* time_task) { + std::lock_guard lock{mutex}; const Handle event_handle = time_task->GetGlobalHandle(); UnscheduleTimeEvent(event_handle); } From bc59ca92b6540efbcf45baab79d0f498eb103f30 Mon Sep 17 00:00:00 2001 From: bunnei Date: Thu, 3 Dec 2020 22:44:06 -0800 Subject: [PATCH 14/26] kernel: KScopedSchedulerLockAndSleep: Remove unused ctor. --- .../k_scoped_scheduler_lock_and_sleep.h | 20 +++++++------------ 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index f11a62216a..16f470923e 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -8,6 +8,7 @@ #pragma once #include "common/common_types.h" +#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -17,20 +18,16 @@ namespace Kernel { class KScopedSchedulerLockAndSleep { private: KernelCore& kernel; - s64 timeout_tick{}; + Handle& event_handle; Thread* thread{}; - Handle* event_handle{}; + s64 timeout_tick{}; public: - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Thread* t, s64 timeout) - : kernel(kernel), timeout_tick(timeout), thread(t) { - /* Lock the scheduler. */ - kernel.GlobalSchedulerContext().scheduler_lock.Lock(); - } - explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, s64 timeout) - : kernel(kernel), event_handle(&event_handle), timeout_tick(timeout), thread(t) { + : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { + event_handle = InvalidHandle; + /* Lock the scheduler. */ kernel.GlobalSchedulerContext().scheduler_lock.Lock(); } @@ -38,10 +35,7 @@ public: ~KScopedSchedulerLockAndSleep() { /* Register the sleep. */ if (this->timeout_tick > 0) { - auto& time_manager = kernel.TimeManager(); - Handle handle{}; - time_manager.ScheduleTimeEvent(event_handle ? *event_handle : handle, this->thread, - this->timeout_tick); + kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); } /* Unlock the scheduler. */ From b1326d9230f7c1f33960d2816042b1a41d3b839f Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:37:35 -0800 Subject: [PATCH 15/26] hle: kernel: Use C++ style comments in KScheduler, etc. --- src/core/hle/kernel/k_priority_queue.h | 63 +++--- src/core/hle/kernel/k_scheduler.cpp | 203 ++++++++---------- src/core/hle/kernel/k_scheduler_lock.h | 16 +- .../k_scoped_scheduler_lock_and_sleep.h | 6 +- 4 files changed, 136 insertions(+), 152 deletions(-) diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 88e4e1ed4a..d374f5c004 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -87,15 +87,15 @@ public: } constexpr bool PushBack(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entry associated with the end of the queue. */ + // Get the entry associated with the end of the queue. Member* tail = this->root[core].GetPrev(); Entry& tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; - /* Link the entries. */ + // Link the entries. member_entry.SetPrev(tail); member_entry.SetNext(nullptr); tail_entry.SetNext(member); @@ -105,15 +105,15 @@ public: } constexpr bool PushFront(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entry associated with the front of the queue. */ + // Get the entry associated with the front of the queue. Member* head = this->root[core].GetNext(); Entry& head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; - /* Link the entries. */ + // Link the entries. member_entry.SetPrev(nullptr); member_entry.SetNext(head); head_entry.SetPrev(member); @@ -123,10 +123,10 @@ public: } constexpr bool Remove(s32 core, Member* member) { - /* Get the entry associated with the member. */ + // Get the entry associated with the member. Entry& member_entry = member->GetPriorityQueueEntry(core); - /* Get the entries associated with next and prev. */ + // Get the entries associated with next and prev. Member* prev = member_entry.GetPrev(); Member* next = member_entry.GetNext(); Entry& prev_entry = @@ -134,7 +134,7 @@ public: Entry& next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; - /* Unlink. */ + // Unlink. prev_entry.SetNext(next); next_entry.SetPrev(prev); @@ -152,8 +152,7 @@ public: Common::BitSet64 available_priorities[NumCores]; public: - constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ - } + constexpr KPriorityQueueImpl() : queues(), available_priorities() {} constexpr void PushBack(s32 priority, s32 core, Member* member) { ASSERT(IsValidCore(core)); @@ -267,14 +266,14 @@ private: constexpr void PushBack(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Push onto the scheduled queue for its core, if we can. */ + // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.PushBack(priority, core, member); ClearAffinityBit(affinity, core); } - /* And suggest the thread for all other cores. */ + // And suggest the thread for all other cores. while (affinity) { this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); } @@ -283,15 +282,15 @@ private: constexpr void PushFront(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Push onto the scheduled queue for its core, if we can. */ + // Push onto the scheduled queue for its core, if we can. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.PushFront(priority, core, member); ClearAffinityBit(affinity, core); } - /* And suggest the thread for all other cores. */ - /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + // And suggest the thread for all other cores. + // Note: Nintendo pushes onto the back of the suggested queue, not the front. while (affinity) { this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); } @@ -300,24 +299,24 @@ private: constexpr void Remove(s32 priority, Member* member) { ASSERT(IsValidPriority(priority)); - /* Remove from the scheduled queue for its core. */ + // Remove from the scheduled queue for its core. u64 affinity = member->GetAffinityMask().GetAffinityMask(); if (const s32 core = member->GetActiveCore(); core >= 0) { this->scheduled_queue.Remove(priority, core, member); ClearAffinityBit(affinity, core); } - /* Remove from the suggested queue for all other cores. */ + // Remove from the suggested queue for all other cores. while (affinity) { this->suggested_queue.Remove(priority, GetNextCore(affinity), member); } } public: - constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ + constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ... } - /* Getters. */ + // Getters. constexpr Member* GetScheduledFront(s32 core) const { return this->scheduled_queue.GetFront(core); } @@ -346,7 +345,7 @@ public: return member->GetPriorityQueueEntry(core).GetNext(); } - /* Mutators. */ + // Mutators. constexpr void PushBack(Member* member) { this->PushBack(member->GetPriority(), member); } @@ -364,15 +363,15 @@ public: member); } - /* First class fancy operations. */ + // First class fancy operations. constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { ASSERT(IsValidPriority(prev_priority)); - /* Remove the member from the queues. */ + // Remove the member from the queues. const s32 new_priority = member->GetPriority(); this->Remove(prev_priority, member); - /* And enqueue. If the member is running, we want to keep it running. */ + // And enqueue. If the member is running, we want to keep it running. if (is_running) { this->PushFront(new_priority, member); } else { @@ -382,12 +381,12 @@ public: constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, Member* member) { - /* Get the new information. */ + // Get the new information. const s32 priority = member->GetPriority(); const AffinityMaskType& new_affinity = member->GetAffinityMask(); const s32 new_core = member->GetActiveCore(); - /* Remove the member from all queues it was in before. */ + // Remove the member from all queues it was in before. for (s32 core = 0; core < static_cast(NumCores); core++) { if (prev_affinity.GetAffinity(core)) { if (core == prev_core) { @@ -398,7 +397,7 @@ public: } } - /* And add the member to all queues it should be in now. */ + // And add the member to all queues it should be in now. for (s32 core = 0; core < static_cast(NumCores); core++) { if (new_affinity.GetAffinity(core)) { if (core == new_core) { @@ -411,18 +410,18 @@ public: } constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { - /* Get the new information. */ + // Get the new information. const s32 new_core = member->GetActiveCore(); const s32 priority = member->GetPriority(); - /* We don't need to do anything if the core is the same. */ + // We don't need to do anything if the core is the same. if (prev_core != new_core) { - /* Remove from the scheduled queue for the previous core. */ + // Remove from the scheduled queue for the previous core. if (prev_core >= 0) { this->scheduled_queue.Remove(priority, prev_core, member); } - /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + // Remove from the suggested queue and add to the scheduled queue for the new core. if (new_core >= 0) { this->suggested_queue.Remove(priority, new_core, member); if (to_front) { @@ -432,7 +431,7 @@ public: } } - /* Add to the suggested queue for the previous core. */ + // Add to the suggested queue for the previous core. if (prev_core >= 0) { this->suggested_queue.PushBack(priority, prev_core, member); } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 9645fee226..cc2f8ef0eb 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -84,35 +84,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { /*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* Clear that we need to update. */ + // Clear that we need to update. ClearSchedulerUpdateNeeded(kernel); u64 cores_needing_scheduling = 0, idle_cores = 0; Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; auto& priority_queue = GetPriorityQueue(kernel); - /* We want to go over all cores, finding the highest priority thread and determining if - * scheduling is needed for that core. */ + /// We want to go over all cores, finding the highest priority thread and determining if + /// scheduling is needed for that core. for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); if (top_thread != nullptr) { - ///* If the thread has no waiters, we need to check if the process has a thread pinned. - ///*/ - // if (top_thread->GetNumKernelWaiters() == 0) { - // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { - // if (Thread* pinned = parent->GetPinnedThread(core_id); - // pinned != nullptr && pinned != top_thread) { - // /* We prefer our parent's pinned thread if possible. However, we also - // don't - // * want to schedule un-runnable threads. */ - // if (pinned->GetRawState() == Thread::ThreadState_Runnable) { - // top_thread = pinned; - // } else { - // top_thread = nullptr; - // } - // } - // } - //} + // If the thread has no waiters, we need to check if the process has a thread pinned. + // TODO(bunnei): Implement thread pinning } else { idle_cores |= (1ULL << core_id); } @@ -122,27 +107,27 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); } - /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ + // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. while (idle_cores != 0) { u32 core_id = Common::CountTrailingZeroes64(idle_cores); if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; size_t num_candidates = 0; - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) { - /* Make sure we're not dealing with threads too high priority for migration. */ + // Make sure we're not dealing with threads too high priority for migration. if (top_thread != nullptr && top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { break; } - /* The suggested thread isn't bound to its core, so we can migrate it! */ + // The suggested thread isn't bound to its core, so we can migrate it! suggested->SetActiveCore(core_id); priority_queue.ChangeCore(suggested_core, suggested); @@ -152,30 +137,30 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { break; } - /* Note this core as a candidate for migration. */ + // Note this core as a candidate for migration. ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); migration_candidates[num_candidates++] = suggested_core; suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our - * candidate cores' top threads. */ + // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our + // candidate cores' top threads. if (suggested == nullptr) { for (size_t i = 0; i < num_candidates; i++) { - /* Check if there's some other thread that can run on the candidate core. */ + // Check if there's some other thread that can run on the candidate core. const s32 candidate_core = migration_candidates[i]; suggested = top_threads[candidate_core]; if (Thread* next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) { - /* The candidate core can run some other thread! We'll migrate its current - * top thread to us. */ + // The candidate core can run some other thread! We'll migrate its current + // top thread to us. top_threads[candidate_core] = next_on_candidate_core; cores_needing_scheduling |= kernel.Scheduler(candidate_core) .UpdateHighestPriorityThread(top_threads[candidate_core]); - /* Perform the migration. */ + // Perform the migration. suggested->SetActiveCore(core_id); priority_queue.ChangeCore(candidate_core, suggested); @@ -199,20 +184,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* Check if the state has changed, because if it hasn't there's nothing to do. */ + // Check if the state has changed, because if it hasn't there's nothing to do. const auto cur_state = thread->scheduling_state; if (cur_state == old_state) { return; } - /* Update the priority queues. */ + // Update the priority queues. if (old_state == static_cast(ThreadSchedStatus::Runnable)) { - /* If we were previously runnable, then we're not runnable now, and we should remove. */ + // If we were previously runnable, then we're not runnable now, and we should remove. GetPriorityQueue(kernel).Remove(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); } else if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* If we're now runnable, then we weren't previously, and we should add. */ + // If we're now runnable, then we weren't previously, and we should add. GetPriorityQueue(kernel).PushBack(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); @@ -224,7 +209,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* If the thread is runnable, we want to change its priority in the queue. */ + // If the thread is runnable, we want to change its priority in the queue. if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { GetPriorityQueue(kernel).ChangePriority( old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); @@ -238,7 +223,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { s32 old_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - /* If the thread is runnable, we want to change its affinity in the queue. */ + // If the thread is runnable, we want to change its affinity in the queue. if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); IncrementScheduledCount(thread); @@ -249,11 +234,11 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { ASSERT(system.GlobalSchedulerContext().IsLocked()); - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& kernel = system.Kernel(); auto& priority_queue = GetPriorityQueue(kernel); - /* Rotate the front of the queue to the end. */ + // Rotate the front of the queue to the end. Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); Thread* next_thread = nullptr; if (top_thread != nullptr) { @@ -264,27 +249,27 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! { Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If the next thread is a new thread that has been waiting longer than our - * suggestion, we prefer it to our suggestion. */ + // If the next thread is a new thread that has been waiting longer than our + // suggestion, we prefer it to our suggestion. if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { suggested = nullptr; break; } - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion - * to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion + // to the front of the queue. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { suggested->SetActiveCore(core_id); @@ -294,38 +279,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSamePriorityNext(core_id, suggested); } } - /* Now that we might have migrated a thread with the same priority, check if we can do better. - */ + // Now that we might have migrated a thread with the same priority, check if we can do better. + { Thread* best_thread = priority_queue.GetScheduledFront(core_id); if (best_thread == GetCurrentThread()) { best_thread = priority_queue.GetScheduledNext(core_id, best_thread); } - /* If the best thread we can choose has a priority the same or worse than ours, try to - * migrate a higher priority thread. */ + // If the best thread we can choose has a priority the same or worse than ours, try to + // migrate a higher priority thread. if (best_thread != nullptr && best_thread->GetPriority() >= static_cast(priority)) { Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* If the suggestion's priority is the same as ours, don't bother. */ + // If the suggestion's priority is the same as ours, don't bother. if (suggested->GetPriority() >= best_thread->GetPriority()) { break; } - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the - * suggestion to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + // suggestion to the front of the queue. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -336,13 +321,13 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } } } - /* After a rotation, we need a scheduler update. */ + // After a rotation, we need a scheduler update. SetSchedulerUpdateNeeded(kernel); } @@ -392,38 +377,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { void KScheduler::YieldWithoutCoreMigration() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Put the current thread at the back of the queue. */ + // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* If the next thread is different, we have an update to perform. */ + // If the next thread is different, we have an update to perform. if (next_thread != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else { - /* Otherwise, set the thread's yield count so that we won't waste work until the - * process is scheduled again. */ + // Otherwise, set the thread's yield count so that we won't waste work until the + // process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } @@ -433,40 +418,40 @@ void KScheduler::YieldWithoutCoreMigration() { void KScheduler::YieldWithCoreMigration() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Get the current active core. */ + // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); - /* Put the current thread at the back of the queue. */ + // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! bool recheck = false; Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* Check if the suggested thread is the thread running on its core. */ + // Check if the suggested thread is the thread running on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* running_on_suggested_core = @@ -474,10 +459,10 @@ void KScheduler::YieldWithCoreMigration() { ? kernel.Scheduler(suggested_core).state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) { - /* If the current thread's priority is higher than our suggestion's we prefer - * the next thread to the suggestion. */ - /* We also prefer the next thread when the current thread's priority is equal to - * the suggestions, but the next thread has been waiting longer. */ + // If the current thread's priority is higher than our suggestion's we prefer + // the next thread to the suggestion. We also prefer the next thread when the + // current thread's priority is equal to the suggestions, but the next thread + // has been waiting longer. if ((suggested->GetPriority() > cur_thread.GetPriority()) || (suggested->GetPriority() == cur_thread.GetPriority() && next_thread != std::addressof(cur_thread) && @@ -486,9 +471,9 @@ void KScheduler::YieldWithCoreMigration() { break; } - /* If we're allowed to do a migration, do one. */ - /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the - * suggestion to the front of the queue. */ + // If we're allowed to do a migration, do one. + // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the + // suggestion to the front of the queue. if (running_on_suggested_core == nullptr || running_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -497,23 +482,23 @@ void KScheduler::YieldWithCoreMigration() { IncrementScheduledCount(suggested); break; } else { - /* We couldn't perform a migration, but we should check again on a future - * yield. */ + // We couldn't perform a migration, but we should check again on a future + // yield. recheck = true; } } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If we still have a suggestion or the next thread is different, we have an update to - * perform. */ + // If we still have a suggestion or the next thread is different, we have an update to + // perform. if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else if (!recheck) { - /* Otherwise if we don't need to re-check, set the thread's yield count so that we - * won't waste work until the process is scheduled again. */ + // Otherwise if we don't need to re-check, set the thread's yield count so that we + // won't waste work until the process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } @@ -523,48 +508,48 @@ void KScheduler::YieldWithCoreMigration() { void KScheduler::YieldToAnyThread() { auto& kernel = system.Kernel(); - /* Validate preconditions. */ + // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); - /* Get the current thread and process. */ + // Get the current thread and process. Thread& cur_thread = *GetCurrentThread(); Process& cur_process = *kernel.CurrentProcess(); - /* If the thread's yield count matches, there's nothing for us to do. */ + // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { return; } - /* Get a reference to the priority queue. */ + // Get a reference to the priority queue. auto& priority_queue = GetPriorityQueue(kernel); - /* Perform the yield. */ + // Perform the yield. { KScopedSchedulerLock lock(kernel); const auto cur_state = cur_thread.scheduling_state; if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { - /* Get the current active core. */ + // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); - /* Migrate the current thread to core -1. */ + // Migrate the current thread to core -1. cur_thread.SetActiveCore(-1); priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); - /* If there's nothing scheduled, we can try to perform a migration. */ + // If there's nothing scheduled, we can try to perform a migration. if (priority_queue.GetScheduledFront(core_id) == nullptr) { - /* While we have a suggested thread, try to migrate it! */ + // While we have a suggested thread, try to migrate it! Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { - /* Check if the suggested thread is the top thread on its core. */ + // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); if (Thread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { - /* If we're allowed to do a migration, do one. */ + // If we're allowed to do a migration, do one. if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { @@ -573,25 +558,25 @@ void KScheduler::YieldToAnyThread() { IncrementScheduledCount(suggested); } - /* Regardless of whether we migrated, we had a candidate, so we're done. */ + // Regardless of whether we migrated, we had a candidate, so we're done. break; } - /* Get the next suggestion. */ + // Get the next suggestion. suggested = priority_queue.GetSuggestedNext(core_id, suggested); } - /* If the suggestion is different from the current thread, we need to perform an - * update. */ + // If the suggestion is different from the current thread, we need to perform an + // update. if (suggested != std::addressof(cur_thread)) { SetSchedulerUpdateNeeded(kernel); } else { - /* Otherwise, set the thread's yield count so that we won't waste work until the - * process is scheduled again. */ + // Otherwise, set the thread's yield count so that we won't waste work until the + // process is scheduled again. cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); } } else { - /* Otherwise, we have an update to perform. */ + // Otherwise, we have an update to perform. SetSchedulerUpdateNeeded(kernel); } } diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index d46f5fc040..39a02af2ae 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -34,19 +34,19 @@ public: void Lock() { if (this->IsLockedByCurrentThread()) { - /* If we already own the lock, we can just increment the count. */ + // If we already own the lock, we can just increment the count. ASSERT(this->lock_count > 0); this->lock_count++; } else { - /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + // Otherwise, we want to disable scheduling and acquire the spinlock. SchedulerType::DisableScheduling(kernel); this->spin_lock.lock(); - /* For debug, ensure that our state is valid. */ + // For debug, ensure that our state is valid. ASSERT(this->lock_count == 0); ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); - /* Increment count, take ownership. */ + // Increment count, take ownership. this->lock_count = 1; this->owner_thread = kernel.GetCurrentEmuThreadID(); } @@ -56,18 +56,18 @@ public: ASSERT(this->IsLockedByCurrentThread()); ASSERT(this->lock_count > 0); - /* Release an instance of the lock. */ + // Release an instance of the lock. if ((--this->lock_count) == 0) { - /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + // We're no longer going to hold the lock. Take note of what cores need scheduling. const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads(kernel); Core::EmuThreadHandle leaving_thread = owner_thread; - /* Note that we no longer hold the lock, and unlock the spinlock. */ + // Note that we no longer hold the lock, and unlock the spinlock. this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); this->spin_lock.unlock(); - /* Enable scheduling, and perform a rescheduling operation. */ + // Enable scheduling, and perform a rescheduling operation. SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); } } diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 16f470923e..d1cc5dda78 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -28,17 +28,17 @@ public: : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { event_handle = InvalidHandle; - /* Lock the scheduler. */ + // Lock the scheduler. kernel.GlobalSchedulerContext().scheduler_lock.Lock(); } ~KScopedSchedulerLockAndSleep() { - /* Register the sleep. */ + // Register the sleep. if (this->timeout_tick > 0) { kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); } - /* Unlock the scheduler. */ + // Unlock the scheduler. kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); } From d2c0c94f0b76fbae9dd5a7b7cb81b9b53db8be0e Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:46:37 -0800 Subject: [PATCH 16/26] common: BitSet: Various style fixes based on code review feedback. --- src/common/bit_set.h | 45 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/src/common/bit_set.h b/src/common/bit_set.h index 4f966de40e..9235ad4128 100644 --- a/src/common/bit_set.h +++ b/src/common/bit_set.h @@ -16,6 +16,9 @@ #pragma once +#include +#include + #include "common/alignment.h" #include "common/bit_util.h" #include "common/common_types.h" @@ -24,33 +27,11 @@ namespace Common { namespace impl { -#define BITSIZEOF(x) (sizeof(x) * CHAR_BIT) - template class BitSet { -private: - static_assert(std::is_integral::value); - static_assert(std::is_unsigned::value); - static_assert(sizeof(Storage) <= sizeof(u64)); - - static constexpr size_t FlagsPerWord = BITSIZEOF(Storage); - static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord; - - static constexpr auto CountLeadingZeroImpl(Storage word) { - return CountLeadingZeroes64(static_cast(word)) - - (BITSIZEOF(unsigned long long) - FlagsPerWord); - } - - static constexpr Storage GetBitMask(size_t bit) { - return Storage(1) << (FlagsPerWord - 1 - bit); - } - -private: - Storage words[NumWords]; public: - constexpr BitSet() : words() { /* ... */ - } + constexpr BitSet() = default; constexpr void SetBit(size_t i) { this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord); @@ -81,6 +62,24 @@ public: } return FlagsPerWord * NumWords; } + +private: + static_assert(std::is_unsigned_v); + static_assert(sizeof(Storage) <= sizeof(u64)); + + static constexpr size_t FlagsPerWord = BitSize(); + static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord; + + static constexpr auto CountLeadingZeroImpl(Storage word) { + return std::countl_zero(static_cast(word)) - + (BitSize() - FlagsPerWord); + } + + static constexpr Storage GetBitMask(size_t bit) { + return Storage(1) << (FlagsPerWord - 1 - bit); + } + + std::array words{}; }; } // namespace impl From 357d79fb6e6ec0d373d8768f1ee32eebe02e55da Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:47:59 -0800 Subject: [PATCH 17/26] hle: kernel: GlobalSchedulerContext: Various style fixes based on code review feedback. --- src/core/hle/kernel/global_scheduler_context.cpp | 7 ++++++- src/core/hle/kernel/global_scheduler_context.h | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index a9cb48b38e..a133e8ed0b 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -31,7 +31,12 @@ void GlobalSchedulerContext::RemoveThread(std::shared_ptr thread) { void GlobalSchedulerContext::PreemptThreads() { // The priority levels at which the global scheduler preempts threads every 10 ms. They are // ordered from Core 0 to Core 3. - std::array preemption_priorities = {59, 59, 59, 63}; + static constexpr std::array preemption_priorities{ + 59, + 59, + 59, + 63, + }; ASSERT(IsLocked()); for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index c4bc23eedd..5c7b89290e 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -21,7 +21,7 @@ class SchedulerLock; using KSchedulerPriorityQueue = KPriorityQueue; -static constexpr s32 HighestCoreMigrationAllowedPriority = 2; +constexpr s32 HighestCoreMigrationAllowedPriority = 2; class GlobalSchedulerContext final { friend class KScheduler; @@ -39,7 +39,7 @@ public: void RemoveThread(std::shared_ptr thread); /// Returns a list of all threads managed by the scheduler - const std::vector>& GetThreadList() const { + [[nodiscard]] const std::vector>& GetThreadList() const { return thread_list; } @@ -55,11 +55,11 @@ public: /// Returns true if the global scheduler lock is acquired bool IsLocked() const; - LockType& SchedulerLock() { + [[nodiscard]] LockType& SchedulerLock() { return scheduler_lock; } - const LockType& SchedulerLock() const { + [[nodiscard]] const LockType& SchedulerLock() const { return scheduler_lock; } From 4d3be1816c9f1ac5c49906c8466fa2618ea15e73 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:50:32 -0800 Subject: [PATCH 18/26] hle: kernel: KAffinityMask: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_affinity_mask.h | 30 ++++++++++++--------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h index fa2a720a4e..dd73781cd8 100644 --- a/src/core/hle/kernel/k_affinity_mask.h +++ b/src/core/hle/kernel/k_affinity_mask.h @@ -14,24 +14,10 @@ namespace Kernel { class KAffinityMask { -private: - static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1; - -private: - u64 mask; - -private: - static constexpr u64 GetCoreBit(s32 core) { - ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); - return (1ull << core); - } - public: - constexpr KAffinityMask() : mask(0) { - ASSERT(this); - } + constexpr KAffinityMask() = default; - constexpr u64 GetAffinityMask() const { + [[nodiscard]] constexpr u64 GetAffinityMask() const { return this->mask; } @@ -40,7 +26,7 @@ public: this->mask = new_mask; } - constexpr bool GetAffinity(s32 core) const { + [[nodiscard]] constexpr bool GetAffinity(s32 core) const { return this->mask & GetCoreBit(core); } @@ -57,6 +43,16 @@ public: constexpr void SetAll() { this->mask = AllowedAffinityMask; } + +private: + [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) { + ASSERT(0 <= core && core < static_cast(Core::Hardware::NUM_CPU_CORES)); + return (1ULL << core); + } + + static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1; + + u64 mask{}; }; } // namespace Kernel From 8fd921557fc8e830ee8330fc1dcf6dcc9963c4c2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 4 Dec 2020 23:57:18 -0800 Subject: [PATCH 19/26] hle: kernel: KPriorityQueue: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_priority_queue.h | 65 ++++++++++++++------------ 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index d374f5c004..01a577d0c3 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -7,6 +7,8 @@ #pragma once +#include + #include "common/assert.h" #include "common/bit_set.h" #include "common/bit_util.h" @@ -17,7 +19,7 @@ namespace Kernel { class Thread; template -concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T & t) { +concept KPriorityQueueAffinityMask = !std::is_reference_v && requires(T & t) { { t.GetAffinityMask() } ->std::convertible_to; {t.SetAffinityMask(std::declval())}; @@ -29,7 +31,7 @@ concept KPriorityQueueAffinityMask = !std::is_reference::value && requires(T }; template -concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { +concept KPriorityQueueMember = !std::is_reference_v && requires(T & t) { {typename T::QueueEntry()}; {(typename T::QueueEntry()).Initialize()}; {(typename T::QueueEntry()).SetPrev(std::addressof(t))}; @@ -54,8 +56,8 @@ concept KPriorityQueueMember = !std::is_reference::value && requires(T & t) { template requires KPriorityQueueMember class KPriorityQueue { public: - using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + using AffinityMaskType = typename std::remove_cv_t< + typename std::remove_reference().GetAffinityMask())>::type>; static_assert(LowestPriority >= 0); static_assert(HighestPriority >= 0); @@ -77,12 +79,12 @@ private: public: class KPerCoreQueue { private: - Entry root[NumCores]; + std::array root{}; public: - constexpr KPerCoreQueue() : root() { - for (size_t i = 0; i < NumCores; i++) { - this->root[i].Initialize(); + constexpr KPerCoreQueue() { + for (auto& per_core_root : root) { + per_core_root.Initialize(); } } @@ -101,7 +103,7 @@ public: tail_entry.SetNext(member); this->root[core].SetPrev(member); - return (tail == nullptr); + return tail == nullptr; } constexpr bool PushFront(s32 core, Member* member) { @@ -147,21 +149,19 @@ public: }; class KPriorityQueueImpl { - private: - KPerCoreQueue queues[NumPriority]; - Common::BitSet64 available_priorities[NumCores]; - public: - constexpr KPriorityQueueImpl() : queues(), available_priorities() {} + constexpr KPriorityQueueImpl() = default; constexpr void PushBack(s32 priority, s32 core, Member* member) { ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].PushBack(core, member)) { - this->available_priorities[core].SetBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); } } @@ -169,10 +169,12 @@ public: ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].PushFront(core, member)) { - this->available_priorities[core].SetBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); } } @@ -180,10 +182,12 @@ public: ASSERT(IsValidCore(core)); ASSERT(IsValidPriority(priority)); - if (priority <= LowestPriority) { - if (this->queues[priority].Remove(core, member)) { - this->available_priorities[core].ClearBit(priority); - } + if (priority > LowestPriority) { + return; + } + + if (this->queues[priority].Remove(core, member)) { + this->available_priorities[core].ClearBit(priority); } } @@ -246,6 +250,10 @@ public: return nullptr; } } + + private: + std::array queues{}; + std::array, NumCores> available_priorities{}; }; private: @@ -254,7 +262,7 @@ private: private: constexpr void ClearAffinityBit(u64& affinity, s32 core) { - affinity &= ~(u64(1ul) << core); + affinity &= ~(u64(1) << core); } constexpr s32 GetNextCore(u64& affinity) { @@ -313,8 +321,7 @@ private: } public: - constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ... - } + constexpr KPriorityQueue() = default; // Getters. constexpr Member* GetScheduledFront(s32 core) const { From 960500cfd2558c52597fff69c1bb0ea38d922b6a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:02:30 -0800 Subject: [PATCH 20/26] hle: kernel: KScheduler: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scheduler.cpp | 42 ++++++++++++------------- src/core/hle/kernel/k_scheduler.h | 49 +++++++++++++---------------- 2 files changed, 41 insertions(+), 50 deletions(-) diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index cc2f8ef0eb..c5fd82a6b9 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -29,8 +29,8 @@ static void IncrementScheduledCount(Kernel::Thread* thread) { } } -/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, - Core::EmuThreadHandle global_thread) { +void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, + Core::EmuThreadHandle global_thread) { u32 current_core = global_thread.host_handle; bool must_context_switch = global_thread.guest_handle != InvalidHandle && (current_core < Core::Hardware::NUM_CPU_CORES); @@ -81,7 +81,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { +u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Clear that we need to update. @@ -94,7 +94,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { /// We want to go over all cores, finding the highest priority thread and determining if /// scheduling is needed for that core. for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); + Thread* top_thread = priority_queue.GetScheduledFront(static_cast(core_id)); if (top_thread != nullptr) { // If the thread has no waiters, we need to check if the process has a thread pinned. // TODO(bunnei): Implement thread pinning @@ -180,8 +180,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { return cores_needing_scheduling; } -/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, - u32 old_state) { +void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Check if the state has changed, because if it hasn't there's nothing to do. @@ -204,8 +203,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, - Thread* current_thread, u32 old_priority) { +void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, + u32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -218,9 +217,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { } } -/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, - const KAffinityMask& old_affinity, - s32 old_core) { +void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, + const KAffinityMask& old_affinity, s32 old_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its affinity in the queue. @@ -331,38 +329,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { SetSchedulerUpdateNeeded(kernel); } -/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) { +bool KScheduler::CanSchedule(KernelCore& kernel) { return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; } -/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { +bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); } -/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { +void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); } -/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { +void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); } -/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) { +void KScheduler::DisableScheduling(KernelCore& kernel) { if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); scheduler->GetCurrentThread()->DisableDispatch(); } } -/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread) { +void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread) { if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { scheduler->GetCurrentThread()->EnableDispatch(); } RescheduleCores(kernel, cores_needing_scheduling, global_thread); } -/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { +u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { if (IsSchedulerUpdateNeeded(kernel)) { return UpdateHighestPriorityThreadsImpl(kernel); } else { @@ -370,7 +368,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { } } -/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { +KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { return kernel.GlobalSchedulerContext().priority_queue; } @@ -585,7 +583,7 @@ void KScheduler::YieldToAnyThread() { KScheduler::KScheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { - switch_fiber = std::make_shared(std::function(OnSwitch), this); + switch_fiber = std::make_shared(OnSwitch, this); this->state.needs_scheduling = true; this->state.interrupt_task_thread_runnable = false; this->state.should_count_idle = false; @@ -722,7 +720,7 @@ void KScheduler::SwitchToCurrent() { } const auto is_switch_pending = [this] { std::scoped_lock lock{guard}; - return !!this->state.needs_scheduling; + return state.needs_scheduling.load(std::memory_order_relaxed); }; do { if (current_thread != nullptr && !current_thread->IsHLEThread()) { diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index d52ecc0db4..e84abc84c5 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -51,32 +51,28 @@ public: void Reload(Thread* thread); /// Gets the current running thread - Thread* GetCurrentThread() const; + [[nodiscard]] Thread* GetCurrentThread() const; /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; + [[nodiscard]] u64 GetLastContextSwitchTicks() const; - bool ContextSwitchPending() const { - return this->state.needs_scheduling; + [[nodiscard]] bool ContextSwitchPending() const { + return state.needs_scheduling.load(std::memory_order_relaxed); } void Initialize(); void OnThreadStart(); - std::shared_ptr& ControlContext() { + [[nodiscard]] std::shared_ptr& ControlContext() { return switch_fiber; } - const std::shared_ptr& ControlContext() const { + [[nodiscard]] const std::shared_ptr& ControlContext() const { return switch_fiber; } - std::size_t CurrentCoreId() const { - return core_id; - } - - u64 UpdateHighestPriorityThread(Thread* highest_thread); + [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread); /** * Takes a thread and moves it to the back of the it's priority list. @@ -114,7 +110,18 @@ public: static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, const KAffinityMask& old_affinity, s32 old_core); + static bool CanSchedule(KernelCore& kernel); + static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); + static void SetSchedulerUpdateNeeded(KernelCore& kernel); + static void ClearSchedulerUpdateNeeded(KernelCore& kernel); + static void DisableScheduling(KernelCore& kernel); + static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, + Core::EmuThreadHandle global_thread); + [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); + private: + friend class GlobalSchedulerContext; + /** * Takes care of selecting the new scheduled threads in three steps: * @@ -129,25 +136,12 @@ private: * * returns the cores needing scheduling. */ - static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); + + [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); void RotateScheduledQueue(s32 core_id, s32 priority); -public: - static bool CanSchedule(KernelCore& kernel); - static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); - static void SetSchedulerUpdateNeeded(KernelCore& kernel); - static void ClearSchedulerUpdateNeeded(KernelCore& kernel); - static void DisableScheduling(KernelCore& kernel); - static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread); - static u64 UpdateHighestPriorityThreads(KernelCore& kernel); - -private: - friend class GlobalSchedulerContext; - - static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); - void Schedule() { ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); this->ScheduleImpl(); @@ -175,7 +169,6 @@ private: static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); -private: Thread* current_thread{}; Thread* idle_thread{}; From 165d8485f05569d47751e2f2f730fd3f417831bb Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:04:24 -0800 Subject: [PATCH 21/26] hle: kernel: KAbstractSchedulerLock: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scheduler_lock.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 39a02af2ae..2d675b39e1 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -17,16 +17,8 @@ class KernelCore; template class KAbstractSchedulerLock { -private: - KernelCore& kernel; - Common::SpinLock spin_lock; - s32 lock_count; - Core::EmuThreadHandle owner_thread; - public: - KAbstractSchedulerLock(KernelCore& kernel) - : kernel{kernel}, spin_lock(), lock_count(0), - owner_thread(Core::EmuThreadHandle::InvalidHandle()) {} + explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {} bool IsLockedByCurrentThread() const { return this->owner_thread == kernel.GetCurrentEmuThreadID(); @@ -71,6 +63,12 @@ public: SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); } } + +private: + KernelCore& kernel; + Common::SpinLock spin_lock{}; + s32 lock_count{}; + Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()}; }; } // namespace Kernel From b1b4f2337e286e2c079634906224fc67ae3ff7c3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:07:00 -0800 Subject: [PATCH 22/26] hle: kernel: KScopedLock: Various style fixes based on code review feedback. --- src/core/hle/kernel/k_scoped_lock.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h index 03320859fd..d7cc557b24 100644 --- a/src/core/hle/kernel/k_scoped_lock.h +++ b/src/core/hle/kernel/k_scoped_lock.h @@ -12,7 +12,7 @@ namespace Kernel { template -concept KLockable = !std::is_reference::value && requires(T & t) { +concept KLockable = !std::is_reference_v && requires(T & t) { { t.Lock() } ->std::same_as; { t.Unlock() } @@ -20,11 +20,7 @@ concept KLockable = !std::is_reference::value && requires(T & t) { }; template -requires KLockable class KScopedLock : NonCopyable { - -private: - T* lock_ptr; - +requires KLockable class KScopedLock { public: explicit KScopedLock(T* l) : lock_ptr(l) { this->lock_ptr->Lock(); @@ -34,6 +30,12 @@ public: ~KScopedLock() { this->lock_ptr->Unlock(); } + + KScopedLock(const KScopedLock&) = delete; + KScopedLock(KScopedLock&&) = delete; + +private: + T* lock_ptr; }; } // namespace Kernel From ed4d1e2adefa4775dc1a950d7db84c7935ae7324 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:07:44 -0800 Subject: [PATCH 23/26] hle: kernel: KScopedSchedulerLockAndSleep: Various style fixes based on code review feedback. --- .../hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index d1cc5dda78..2bb3817fa7 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -16,12 +16,6 @@ namespace Kernel { class KScopedSchedulerLockAndSleep { -private: - KernelCore& kernel; - Handle& event_handle; - Thread* thread{}; - s64 timeout_tick{}; - public: explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, s64 timeout) @@ -45,6 +39,12 @@ public: void CancelSleep() { this->timeout_tick = 0; } + +private: + KernelCore& kernel; + Handle& event_handle; + Thread* thread{}; + s64 timeout_tick{}; }; } // namespace Kernel From 9b492430bb349b98ac4768ddde044a63976fe146 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 5 Dec 2020 00:17:18 -0800 Subject: [PATCH 24/26] hle: kernel: Thread: Various style fixes based on code review feedback. --- src/core/hle/kernel/thread.h | 47 +++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index f1aa358a4e..11ef298883 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -4,6 +4,7 @@ #pragma once +#include #include #include #include @@ -346,10 +347,11 @@ public: void SetStatus(ThreadStatus new_status); - constexpr s64 GetLastScheduledTick() const { + s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } - constexpr void SetLastScheduledTick(s64 tick) { + + void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } @@ -481,7 +483,7 @@ public: return ideal_core; } - constexpr const KAffinityMask& GetAffinityMask() const { + const KAffinityMask& GetAffinityMask() const { return affinity_mask; } @@ -490,10 +492,11 @@ public: /// Sleeps this thread for the given amount of nanoseconds. ResultCode Sleep(s64 nanoseconds); - constexpr s64 GetYieldScheduleCount() const { + s64 GetYieldScheduleCount() const { return this->schedule_count; } - constexpr void SetYieldScheduleCount(s64 count) { + + void SetYieldScheduleCount(s64 count) { this->schedule_count = count; } @@ -570,14 +573,9 @@ public: return has_exited; } - struct QueueEntry { - private: - Thread* prev; - Thread* next; - + class QueueEntry { public: - constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ - } + constexpr QueueEntry() = default; constexpr void Initialize() { this->prev = nullptr; @@ -590,18 +588,23 @@ public: constexpr Thread* GetNext() const { return this->next; } - constexpr void SetPrev(Thread* t) { - this->prev = t; + constexpr void SetPrev(Thread* thread) { + this->prev = thread; } - constexpr void SetNext(Thread* t) { - this->next = t; + constexpr void SetNext(Thread* thread) { + this->next = thread; } + + private: + Thread* prev{}; + Thread* next{}; }; - constexpr QueueEntry& GetPriorityQueueEntry(s32 core) { + QueueEntry& GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } - constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const { + + const QueueEntry& GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; } @@ -619,9 +622,6 @@ public: disable_count--; } - ThreadStatus status = ThreadStatus::Dormant; - u32 scheduling_state = 0; - private: friend class GlobalSchedulerContext; friend class KScheduler; @@ -638,6 +638,9 @@ private: ThreadContext64 context_64{}; std::shared_ptr host_context{}; + ThreadStatus status = ThreadStatus::Dormant; + u32 scheduling_state = 0; + u64 thread_id = 0; VAddr entry_point = 0; @@ -701,7 +704,7 @@ private: KScheduler* scheduler = nullptr; - QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{}; + std::array per_core_priority_queue_entry{}; u32 ideal_core{0xFFFFFFFF}; KAffinityMask affinity_mask{}; From d4ae0ae0e9077c41499bb6a1d4bed2e047312748 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 6 Dec 2020 00:16:39 -0800 Subject: [PATCH 25/26] core: cpu_manager: Fix a typo in PreemptSingleCore, which broke many games. - We were reload'ing the old current scheduler, which may have changed. --- src/core/cpu_manager.cpp | 47 ++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 1791543483..3733950475 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -254,29 +254,34 @@ void CpuManager::SingleCoreRunSuspendThread() { } void CpuManager::PreemptSingleCore(bool from_running_enviroment) { - std::size_t old_core = current_core; - auto& scheduler = system.Kernel().Scheduler(old_core); - Kernel::Thread* current_thread = scheduler.GetCurrentThread(); - if (idle_count >= 4 || from_running_enviroment) { - if (!from_running_enviroment) { - system.CoreTiming().Idle(); + { + auto& scheduler = system.Kernel().Scheduler(current_core); + Kernel::Thread* current_thread = scheduler.GetCurrentThread(); + if (idle_count >= 4 || from_running_enviroment) { + if (!from_running_enviroment) { + system.CoreTiming().Idle(); + idle_count = 0; + } + current_thread->SetPhantomMode(true); + system.CoreTiming().Advance(); + current_thread->SetPhantomMode(false); + } + current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); + system.CoreTiming().ResetTicks(); + scheduler.Unload(scheduler.GetCurrentThread()); + + auto& next_scheduler = system.Kernel().Scheduler(current_core); + Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); + } + + // May have changed scheduler + { + auto& scheduler = system.Kernel().Scheduler(current_core); + scheduler.Reload(scheduler.GetCurrentThread()); + auto* currrent_thread2 = scheduler.GetCurrentThread(); + if (!currrent_thread2->IsIdleThread()) { idle_count = 0; } - current_thread->SetPhantomMode(true); - system.CoreTiming().Advance(); - current_thread->SetPhantomMode(false); - } - current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); - system.CoreTiming().ResetTicks(); - scheduler.Unload(scheduler.GetCurrentThread()); - auto& next_scheduler = system.Kernel().Scheduler(current_core); - Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); - /// May have changed scheduler - auto& current_scheduler = system.Kernel().Scheduler(current_core); - current_scheduler.Reload(scheduler.GetCurrentThread()); - auto* currrent_thread2 = current_scheduler.GetCurrentThread(); - if (!currrent_thread2->IsIdleThread()) { - idle_count = 0; } } From 1bdb756d28a00f168094e490143dba89569fe35a Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 6 Dec 2020 00:25:58 -0800 Subject: [PATCH 26/26] hle: kernel: Process: Various style fixes based on code review feedback. --- src/core/hle/kernel/process.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 36d8547bde..e412e58aac 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -217,12 +217,12 @@ public: } /// Gets the process schedule count, used for thread yelding - constexpr s64 GetScheduledCount() const { + s64 GetScheduledCount() const { return schedule_count; } /// Increments the process schedule count, used for thread yielding. - constexpr void IncrementScheduledCount() { + void IncrementScheduledCount() { ++schedule_count; }