2018-02-18 20:58:40 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-07-19 01:02:47 +02:00
|
|
|
#include <utility>
|
|
|
|
|
2018-03-13 22:49:59 +01:00
|
|
|
#include "core/core.h"
|
2018-02-18 20:58:40 +01:00
|
|
|
#include "core/core_timing.h"
|
|
|
|
#include "core/hle/kernel/process.h"
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2018-05-08 04:12:45 +02:00
|
|
|
std::mutex Scheduler::scheduler_mutex;
|
|
|
|
|
2018-02-19 22:46:42 +01:00
|
|
|
Scheduler::Scheduler(ARM_Interface* cpu_core) : cpu_core(cpu_core) {}
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
Scheduler::~Scheduler() {
|
|
|
|
for (auto& thread : thread_list) {
|
|
|
|
thread->Stop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Scheduler::HaveReadyThreads() {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
2018-02-18 20:58:40 +01:00
|
|
|
return ready_queue.get_first() != nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* Scheduler::GetCurrentThread() const {
|
|
|
|
return current_thread.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* Scheduler::PopNextReadyThread() {
|
|
|
|
Thread* next = nullptr;
|
|
|
|
Thread* thread = GetCurrentThread();
|
|
|
|
|
2018-07-20 03:39:05 +02:00
|
|
|
if (thread && thread->status == ThreadStatus::Running) {
|
2018-02-18 20:58:40 +01:00
|
|
|
// We have to do better than the current thread.
|
|
|
|
// This call returns null when that's not possible.
|
|
|
|
next = ready_queue.pop_first_better(thread->current_priority);
|
|
|
|
if (!next) {
|
|
|
|
// Otherwise just keep going with the current thread
|
|
|
|
next = thread;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
next = ready_queue.pop_first();
|
|
|
|
}
|
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::SwitchContext(Thread* new_thread) {
|
|
|
|
Thread* previous_thread = GetCurrentThread();
|
|
|
|
|
|
|
|
// Save context for previous thread
|
|
|
|
if (previous_thread) {
|
|
|
|
previous_thread->last_running_ticks = CoreTiming::GetTicks();
|
|
|
|
cpu_core->SaveContext(previous_thread->context);
|
2018-07-21 02:57:45 +02:00
|
|
|
// Save the TPIDR_EL0 system register in case it was modified.
|
|
|
|
previous_thread->tpidr_el0 = cpu_core->GetTPIDR_EL0();
|
2018-02-18 20:58:40 +01:00
|
|
|
|
2018-07-20 03:39:05 +02:00
|
|
|
if (previous_thread->status == ThreadStatus::Running) {
|
2018-02-18 20:58:40 +01:00
|
|
|
// This is only the case when a reschedule is triggered without the current thread
|
|
|
|
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
|
|
|
|
ready_queue.push_front(previous_thread->current_priority, previous_thread);
|
2018-07-20 03:39:05 +02:00
|
|
|
previous_thread->status = ThreadStatus::Ready;
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load context of new thread
|
|
|
|
if (new_thread) {
|
2018-07-20 03:39:05 +02:00
|
|
|
ASSERT_MSG(new_thread->status == ThreadStatus::Ready,
|
2018-02-18 20:58:40 +01:00
|
|
|
"Thread must be ready to become running.");
|
|
|
|
|
|
|
|
// Cancel any outstanding wakeup events for this thread
|
|
|
|
new_thread->CancelWakeupTimer();
|
|
|
|
|
2018-03-13 22:49:59 +01:00
|
|
|
auto previous_process = Core::CurrentProcess();
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
current_thread = new_thread;
|
|
|
|
|
|
|
|
ready_queue.remove(new_thread->current_priority, new_thread);
|
2018-07-20 03:39:05 +02:00
|
|
|
new_thread->status = ThreadStatus::Running;
|
2018-02-18 20:58:40 +01:00
|
|
|
|
|
|
|
if (previous_process != current_thread->owner_process) {
|
2018-03-13 22:49:59 +01:00
|
|
|
Core::CurrentProcess() = current_thread->owner_process;
|
|
|
|
SetCurrentPageTable(&Core::CurrentProcess()->vm_manager.page_table);
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
cpu_core->LoadContext(new_thread->context);
|
|
|
|
cpu_core->SetTlsAddress(new_thread->GetTLSAddress());
|
2018-07-21 02:57:45 +02:00
|
|
|
cpu_core->SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
2018-07-16 12:24:00 +02:00
|
|
|
cpu_core->ClearExclusiveState();
|
2018-02-18 20:58:40 +01:00
|
|
|
} else {
|
|
|
|
current_thread = nullptr;
|
|
|
|
// Note: We do not reset the current process and current page table when idling because
|
|
|
|
// technically we haven't changed processes, our threads are just paused.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::Reschedule() {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
Thread* cur = GetCurrentThread();
|
|
|
|
Thread* next = PopNextReadyThread();
|
|
|
|
|
|
|
|
if (cur && next) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
} else if (cur) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
} else if (next) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
2018-02-18 20:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SwitchContext(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-07-19 01:02:47 +02:00
|
|
|
thread_list.push_back(std::move(thread));
|
2018-02-18 20:58:40 +01:00
|
|
|
ready_queue.prepare(priority);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::RemoveThread(Thread* thread) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
|
|
|
thread_list.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-07-20 03:39:05 +02:00
|
|
|
ASSERT(thread->status == ThreadStatus::Ready);
|
2018-02-18 20:58:40 +01:00
|
|
|
ready_queue.push_back(priority, thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-07-20 03:39:05 +02:00
|
|
|
ASSERT(thread->status == ThreadStatus::Ready);
|
2018-02-18 20:58:40 +01:00
|
|
|
ready_queue.remove(priority, thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
2018-05-08 04:12:45 +02:00
|
|
|
std::lock_guard<std::mutex> lock(scheduler_mutex);
|
|
|
|
|
2018-02-18 20:58:40 +01:00
|
|
|
// If thread was ready, adjust queues
|
2018-07-20 03:39:05 +02:00
|
|
|
if (thread->status == ThreadStatus::Ready)
|
2018-02-18 20:58:40 +01:00
|
|
|
ready_queue.move(thread, thread->current_priority, priority);
|
|
|
|
else
|
|
|
|
ready_queue.prepare(priority);
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Kernel
|