2018-02-18 20:58:40 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-07-31 14:06:09 +02:00
|
|
|
#include <algorithm>
|
2018-07-19 01:02:47 +02:00
|
|
|
#include <utility>
|
|
|
|
|
2018-07-31 14:06:09 +02:00
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
#include "core/arm/arm_interface.h"
|
2018-03-13 22:49:59 +01:00
|
|
|
#include "core/core.h"
|
2018-11-22 06:33:53 +01:00
|
|
|
#include "core/core_cpu.h"
|
2018-10-26 00:42:50 +02:00
|
|
|
#include "core/core_timing.h"
|
2018-10-10 06:42:10 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2018-02-18 20:58:40 +01:00
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2018-05-08 04:12:45 +02:00
|
|
|
std::mutex Scheduler::scheduler_mutex;
|
|
|
|
|
2019-03-04 22:02:59 +01:00
|
|
|
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
|
|
|
|
: cpu_core{cpu_core}, system{system} {}
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
Scheduler::~Scheduler() {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
thread->Stop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-12 18:55:56 +02:00
|
|
|
bool Scheduler::HaveReadyThreads() const {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
2019-03-16 05:30:15 +01:00
|
|
|
return !ready_queue.empty();
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Thread* Scheduler::GetCurrentThread() const {
|
|
|
|
return current_thread.get();
|
|
|
|
}
|
|
|
|
|
2018-10-26 00:42:50 +02:00
|
|
|
u64 Scheduler::GetLastContextSwitchTicks() const {
|
|
|
|
return last_context_switch_time;
|
|
|
|
}
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
Thread* Scheduler::PopNextReadyThread() {
|
|
|
|
Thread* next = nullptr;
|
|
|
|
Thread* thread = GetCurrentThread();
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread && thread->GetStatus() == ThreadStatus::Running) {
|
2019-03-20 03:20:15 +01:00
|
|
|
if (ready_queue.empty()) {
|
2019-03-16 05:30:15 +01:00
|
|
|
return thread;
|
2019-03-20 03:20:15 +01:00
|
|
|
}
|
2018-02-18 20:58:40 +01:00
|
|
|
// We have to do better than the current thread.
|
|
|
|
// This call returns null when that's not possible.
|
2019-03-16 05:30:15 +01:00
|
|
|
next = ready_queue.front();
|
|
|
|
if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
|
2018-02-18 20:58:40 +01:00
|
|
|
next = thread;
|
|
|
|
}
|
|
|
|
} else {
|
2019-03-20 03:20:15 +01:00
|
|
|
if (ready_queue.empty()) {
|
2019-03-16 05:30:15 +01:00
|
|
|
return nullptr;
|
2019-03-20 03:20:15 +01:00
|
|
|
}
|
2019-03-16 05:30:15 +01:00
|
|
|
next = ready_queue.front();
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::SwitchContext(Thread* new_thread) {
|
2019-03-16 05:30:15 +01:00
|
|
|
Thread* previous_thread = GetCurrentThread();
|
2019-03-04 22:02:59 +01:00
|
|
|
Process* const previous_process = system.Kernel().CurrentProcess();
|
2018-10-26 00:42:50 +02:00
|
|
|
|
|
|
|
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
// Save context for previous thread
|
|
|
|
if (previous_thread) {
|
2018-10-04 00:47:57 +02:00
|
|
|
cpu_core.SaveContext(previous_thread->GetContext());
|
2018-07-21 02:57:45 +02:00
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
2018-10-04 00:47:57 +02:00
|
|
|
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
2018-02-18 20:58:40 +01:00
|
|
|
// This is only the case when a reschedule is triggered without the current thread
|
|
|
|
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
|
2019-03-16 05:30:15 +01:00
|
|
|
ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
|
2018-10-04 00:47:57 +02:00
|
|
|
previous_thread->SetStatus(ThreadStatus::Ready);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load context of new thread
|
|
|
|
if (new_thread) {
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
|
2018-02-18 20:58:40 +01:00
|
|
|
"Thread must be ready to become running.");
|
|
|
|
|
|
|
|
// Cancel any outstanding wakeup events for this thread
|
|
|
|
new_thread->CancelWakeupTimer();
|
|
|
|
|
|
|
|
current_thread = new_thread;
|
|
|
|
|
2019-03-16 05:30:15 +01:00
|
|
|
ready_queue.remove(new_thread, new_thread->GetPriority());
|
2018-10-04 00:47:57 +02:00
|
|
|
new_thread->SetStatus(ThreadStatus::Running);
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
auto* const thread_owner_process = current_thread->GetOwnerProcess();
|
2018-10-04 00:47:57 +02:00
|
|
|
if (previous_process != thread_owner_process) {
|
2019-03-04 22:02:59 +01:00
|
|
|
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
2019-03-02 21:20:28 +01:00
|
|
|
Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
cpu_core.LoadContext(new_thread->GetContext());
|
2018-09-25 22:00:14 +02:00
|
|
|
cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
|
|
|
|
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
|
|
|
cpu_core.ClearExclusiveState();
|
2018-02-18 20:58:40 +01:00
|
|
|
} else {
|
|
|
|
current_thread = nullptr;
|
|
|
|
// Note: We do not reset the current process and current page table when idling because
|
|
|
|
// technically we haven't changed processes, our threads are just paused.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-26 00:42:50 +02:00
|
|
|
void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
|
|
|
const u64 prev_switch_ticks = last_context_switch_time;
|
2019-03-04 22:02:59 +01:00
|
|
|
const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
|
2018-10-26 00:42:50 +02:00
|
|
|
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
|
|
|
|
|
|
|
if (thread != nullptr) {
|
|
|
|
thread->UpdateCPUTimeTicks(update_ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (process != nullptr) {
|
|
|
|
process->UpdateCPUTimeTicks(update_ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
last_context_switch_time = most_recent_switch_ticks;
|
|
|
|
}
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
void Scheduler::Reschedule() {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
Thread* cur = GetCurrentThread();
|
|
|
|
Thread* next = PopNextReadyThread();
|
|
|
|
|
|
|
|
if (cur && next) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
} else if (cur) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
} else if (next) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SwitchContext(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-07-19 01:02:47 +02:00
|
|
|
thread_list.push_back(std::move(thread));
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::RemoveThread(Thread* thread) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
|
|
|
thread_list.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
2019-03-16 05:30:15 +01:00
|
|
|
ready_queue.add(thread, priority);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
2019-03-16 05:30:15 +01:00
|
|
|
ready_queue.remove(thread, priority);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
2019-03-20 03:20:15 +01:00
|
|
|
if (thread->GetPriority() == priority) {
|
2019-03-16 05:30:15 +01:00
|
|
|
return;
|
2019-03-20 03:20:15 +01:00
|
|
|
}
|
2018-05-08 04:12:45 +02:00
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
// If thread was ready, adjust queues
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread->GetStatus() == ThreadStatus::Ready)
|
2019-03-16 05:30:15 +01:00
|
|
|
ready_queue.adjust(thread, thread->GetPriority(), priority);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
2018-12-03 23:29:21 +01:00
|
|
|
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
|
2018-11-19 05:44:19 +01:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-11-22 06:33:53 +01:00
|
|
|
const u32 mask = 1U << core;
|
2019-03-20 03:20:15 +01:00
|
|
|
for (auto* thread : ready_queue) {
|
|
|
|
if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
|
2019-03-16 05:30:15 +01:00
|
|
|
return thread;
|
2019-03-20 03:20:15 +01:00
|
|
|
}
|
2019-03-16 05:30:15 +01:00
|
|
|
}
|
|
|
|
return nullptr;
|
2018-11-22 06:33:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
|
|
|
|
ASSERT(thread != nullptr);
|
|
|
|
// Avoid yielding if the thread isn't even running.
|
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Running);
|
|
|
|
|
|
|
|
// Sanity check that the priority is valid
|
|
|
|
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
|
|
|
|
|
2018-12-03 23:29:21 +01:00
|
|
|
// Yield this thread -- sleep for zero time and force reschedule to different thread
|
2019-03-16 04:28:29 +01:00
|
|
|
GetCurrentThread()->Sleep(0);
|
2018-11-22 06:33:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
|
|
|
|
ASSERT(thread != nullptr);
|
|
|
|
const auto priority = thread->GetPriority();
|
|
|
|
const auto core = static_cast<u32>(thread->GetProcessorID());
|
|
|
|
|
|
|
|
// Avoid yielding if the thread isn't even running.
|
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Running);
|
|
|
|
|
|
|
|
// Sanity check that the priority is valid
|
|
|
|
ASSERT(priority < THREADPRIO_COUNT);
|
|
|
|
|
2018-12-03 23:29:21 +01:00
|
|
|
// Sleep for zero time to be able to force reschedule to different thread
|
2019-03-16 04:28:29 +01:00
|
|
|
GetCurrentThread()->Sleep(0);
|
2018-11-22 06:33:53 +01:00
|
|
|
|
|
|
|
Thread* suggested_thread = nullptr;
|
|
|
|
|
|
|
|
// Search through all of the cpu cores (except this one) for a suggested thread.
|
|
|
|
// Take the first non-nullptr one
|
|
|
|
for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
|
|
|
|
const auto res =
|
2019-03-04 22:02:59 +01:00
|
|
|
system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
|
2018-12-04 03:22:09 +01:00
|
|
|
|
|
|
|
// If scheduler provides a suggested thread
|
|
|
|
if (res != nullptr) {
|
|
|
|
// And its better than the current suggested thread (or is the first valid one)
|
|
|
|
if (suggested_thread == nullptr ||
|
|
|
|
suggested_thread->GetPriority() > res->GetPriority()) {
|
|
|
|
suggested_thread = res;
|
|
|
|
}
|
2018-11-22 06:33:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a suggested thread was found, queue that for this core
|
|
|
|
if (suggested_thread != nullptr)
|
|
|
|
suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
|
|
|
|
UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
|
2018-11-19 05:44:19 +01:00
|
|
|
}
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
} // namespace Kernel
|