2018-11-22 07:27:23 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "common/fiber.h"
|
2020-02-25 16:12:46 +01:00
|
|
|
#include "common/microprofile.h"
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "common/thread.h"
|
2018-11-22 07:27:23 +01:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
|
|
|
#include "core/core.h"
|
2019-09-10 03:37:29 +02:00
|
|
|
#include "core/core_timing.h"
|
2020-01-26 19:07:22 +01:00
|
|
|
#include "core/cpu_manager.h"
|
2020-03-16 02:34:22 +01:00
|
|
|
#include "core/frontend/emu_window.h"
|
2018-11-22 07:27:23 +01:00
|
|
|
#include "core/gdbstub/gdbstub.h"
|
2020-02-25 03:04:12 +01:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
|
|
|
#include "core/hle/kernel/physical_core.h"
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
|
#include "core/hle/kernel/thread.h"
|
2018-11-22 07:27:23 +01:00
|
|
|
|
|
|
|
namespace Core {
|
|
|
|
|
2020-01-26 19:07:22 +01:00
|
|
|
CpuManager::CpuManager(System& system) : system{system} {}
|
|
|
|
CpuManager::~CpuManager() = default;
|
2018-11-22 07:27:23 +01:00
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
|
2020-03-16 02:34:22 +01:00
|
|
|
if (!cpu_manager.is_async_gpu && !cpu_manager.is_multicore) {
|
|
|
|
cpu_manager.render_window->MakeCurrent();
|
|
|
|
}
|
2020-02-25 03:04:12 +01:00
|
|
|
cpu_manager.RunThread(core);
|
2020-03-16 02:34:22 +01:00
|
|
|
if (!cpu_manager.is_async_gpu && !cpu_manager.is_multicore) {
|
|
|
|
cpu_manager.render_window->DoneCurrent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::SetRenderWindow(Core::Frontend::EmuWindow& render_window) {
|
|
|
|
this->render_window = &render_window;
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
|
|
|
|
2020-01-26 19:07:22 +01:00
|
|
|
void CpuManager::Initialize() {
|
2020-02-25 03:04:12 +01:00
|
|
|
running_mode = true;
|
2020-03-09 03:39:41 +01:00
|
|
|
if (is_multicore) {
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
core_data[core].host_thread =
|
|
|
|
std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-26 19:07:22 +01:00
|
|
|
void CpuManager::Shutdown() {
|
2020-02-25 03:04:12 +01:00
|
|
|
running_mode = false;
|
|
|
|
Pause(false);
|
2020-03-13 00:55:53 +01:00
|
|
|
if (is_multicore) {
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
core_data[core].host_thread->join();
|
|
|
|
core_data[core].host_thread.reset();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
core_data[0].host_thread->join();
|
|
|
|
core_data[0].host_thread.reset();
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
|
|
|
|
return std::function<void(void*)>(GuestThreadFunction);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
|
|
|
|
return std::function<void(void*)>(IdleThreadFunction);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
|
|
|
|
return std::function<void(void*)>(SuspendThreadFunction);
|
|
|
|
}
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
void CpuManager::GuestThreadFunction(void* cpu_manager_) {
|
|
|
|
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
|
2020-03-09 03:39:41 +01:00
|
|
|
if (cpu_manager->is_multicore) {
|
|
|
|
cpu_manager->MultiCoreRunGuestThread();
|
|
|
|
} else {
|
|
|
|
cpu_manager->SingleCoreRunGuestThread();
|
|
|
|
}
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
|
|
|
|
2020-02-28 00:12:41 +01:00
|
|
|
void CpuManager::GuestRewindFunction(void* cpu_manager_) {
|
|
|
|
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
|
2020-03-09 03:39:41 +01:00
|
|
|
if (cpu_manager->is_multicore) {
|
|
|
|
cpu_manager->MultiCoreRunGuestLoop();
|
|
|
|
} else {
|
|
|
|
cpu_manager->SingleCoreRunGuestLoop();
|
|
|
|
}
|
2020-02-28 00:12:41 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
void CpuManager::IdleThreadFunction(void* cpu_manager_) {
|
|
|
|
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
|
2020-03-09 03:39:41 +01:00
|
|
|
if (cpu_manager->is_multicore) {
|
|
|
|
cpu_manager->MultiCoreRunIdleThread();
|
|
|
|
} else {
|
|
|
|
cpu_manager->SingleCoreRunIdleThread();
|
|
|
|
}
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
|
|
|
|
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
|
2020-03-09 03:39:41 +01:00
|
|
|
if (cpu_manager->is_multicore) {
|
|
|
|
cpu_manager->MultiCoreRunSuspendThread();
|
|
|
|
} else {
|
|
|
|
cpu_manager->SingleCoreRunSuspendThread();
|
|
|
|
}
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void* CpuManager::GetStartFuncParamater() {
|
|
|
|
return static_cast<void*>(this);
|
|
|
|
}
|
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// MultiCore ///
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
void CpuManager::MultiCoreRunGuestThread() {
|
2020-02-25 03:04:12 +01:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
{
|
|
|
|
auto& sched = kernel.CurrentScheduler();
|
|
|
|
sched.OnThreadStart();
|
|
|
|
}
|
2020-03-09 03:39:41 +01:00
|
|
|
MultiCoreRunGuestLoop();
|
2020-02-28 00:12:41 +01:00
|
|
|
}
|
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
void CpuManager::MultiCoreRunGuestLoop() {
|
2020-02-28 00:12:41 +01:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
|
|
|
|
auto host_context = thread->GetHostContext();
|
|
|
|
host_context->SetRewindPoint(std::function<void(void*)>(GuestRewindFunction), this);
|
|
|
|
host_context.reset();
|
2020-02-25 03:04:12 +01:00
|
|
|
while (true) {
|
2020-02-28 00:12:41 +01:00
|
|
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
2020-03-12 21:48:43 +01:00
|
|
|
system.EnterDynarmicProfile();
|
2020-02-28 00:12:41 +01:00
|
|
|
while (!physical_core.IsInterrupted()) {
|
|
|
|
physical_core.Run();
|
2020-02-25 15:43:34 +01:00
|
|
|
}
|
2020-03-12 21:48:43 +01:00
|
|
|
system.ExitDynarmicProfile();
|
2020-02-28 00:12:41 +01:00
|
|
|
physical_core.ClearExclusive();
|
|
|
|
auto& scheduler = physical_core.Scheduler();
|
2020-02-25 03:04:12 +01:00
|
|
|
scheduler.TryDoContextSwitch();
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
2020-02-25 03:04:12 +01:00
|
|
|
}
|
2018-11-22 07:27:23 +01:00
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
void CpuManager::MultiCoreRunIdleThread() {
|
2020-02-25 03:04:12 +01:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
while (true) {
|
|
|
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
|
|
|
physical_core.Idle();
|
|
|
|
auto& scheduler = physical_core.Scheduler();
|
|
|
|
scheduler.TryDoContextSwitch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
void CpuManager::MultiCoreRunSuspendThread() {
|
2020-02-25 03:04:12 +01:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
{
|
|
|
|
auto& sched = kernel.CurrentScheduler();
|
|
|
|
sched.OnThreadStart();
|
|
|
|
}
|
|
|
|
while (true) {
|
|
|
|
auto core = kernel.GetCurrentHostThreadID();
|
|
|
|
auto& scheduler = kernel.CurrentScheduler();
|
|
|
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
|
|
|
ASSERT(scheduler.ContextSwitchPending());
|
|
|
|
ASSERT(core == kernel.GetCurrentHostThreadID());
|
|
|
|
scheduler.TryDoContextSwitch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
void CpuManager::MultiCorePause(bool paused) {
|
2020-02-25 03:04:12 +01:00
|
|
|
if (!paused) {
|
|
|
|
bool all_not_barrier = false;
|
|
|
|
while (!all_not_barrier) {
|
|
|
|
all_not_barrier = true;
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
all_not_barrier &=
|
|
|
|
!core_data[core].is_running.load() && core_data[core].initialized.load();
|
2019-09-10 03:37:29 +02:00
|
|
|
}
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
2020-02-25 03:04:12 +01:00
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
core_data[core].enter_barrier->Set();
|
|
|
|
}
|
|
|
|
if (paused_state.load()) {
|
|
|
|
bool all_barrier = false;
|
|
|
|
while (!all_barrier) {
|
|
|
|
all_barrier = true;
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
all_barrier &=
|
|
|
|
core_data[core].is_paused.load() && core_data[core].initialized.load();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
core_data[core].exit_barrier->Set();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/// Wait until all cores are paused.
|
|
|
|
bool all_barrier = false;
|
|
|
|
while (!all_barrier) {
|
|
|
|
all_barrier = true;
|
|
|
|
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
|
|
|
all_barrier &=
|
|
|
|
core_data[core].is_paused.load() && core_data[core].initialized.load();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/// Don't release the barrier
|
|
|
|
}
|
|
|
|
paused_state = paused;
|
|
|
|
}
|
2018-11-22 07:27:23 +01:00
|
|
|
|
2020-03-09 03:39:41 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// SingleCore ///
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
void CpuManager::SingleCoreRunGuestThread() {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
{
|
|
|
|
auto& sched = kernel.CurrentScheduler();
|
|
|
|
sched.OnThreadStart();
|
|
|
|
}
|
|
|
|
SingleCoreRunGuestLoop();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::SingleCoreRunGuestLoop() {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
auto* thread = kernel.CurrentScheduler().GetCurrentThread();
|
|
|
|
auto host_context = thread->GetHostContext();
|
|
|
|
host_context->SetRewindPoint(std::function<void(void*)>(GuestRewindFunction), this);
|
|
|
|
host_context.reset();
|
|
|
|
while (true) {
|
|
|
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
2020-03-12 21:48:43 +01:00
|
|
|
system.EnterDynarmicProfile();
|
2020-03-09 03:39:41 +01:00
|
|
|
while (!physical_core.IsInterrupted()) {
|
|
|
|
physical_core.Run();
|
|
|
|
preemption_count++;
|
|
|
|
if (preemption_count % max_cycle_runs == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
physical_core.ClearExclusive();
|
2020-03-12 21:48:43 +01:00
|
|
|
system.ExitDynarmicProfile();
|
2020-03-09 03:39:41 +01:00
|
|
|
PreemptSingleCore();
|
2020-03-10 16:50:33 +01:00
|
|
|
auto& scheduler = kernel.Scheduler(current_core);
|
2020-03-09 03:39:41 +01:00
|
|
|
scheduler.TryDoContextSwitch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::SingleCoreRunIdleThread() {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
while (true) {
|
|
|
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
|
|
|
PreemptSingleCore();
|
|
|
|
auto& scheduler = physical_core.Scheduler();
|
|
|
|
scheduler.TryDoContextSwitch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::SingleCoreRunSuspendThread() {
|
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
{
|
|
|
|
auto& sched = kernel.CurrentScheduler();
|
|
|
|
sched.OnThreadStart();
|
|
|
|
}
|
|
|
|
while (true) {
|
|
|
|
auto core = kernel.GetCurrentHostThreadID();
|
|
|
|
auto& scheduler = kernel.CurrentScheduler();
|
|
|
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
|
|
|
ASSERT(scheduler.ContextSwitchPending());
|
|
|
|
ASSERT(core == kernel.GetCurrentHostThreadID());
|
|
|
|
scheduler.TryDoContextSwitch();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::PreemptSingleCore() {
|
|
|
|
preemption_count = 0;
|
|
|
|
std::size_t old_core = current_core;
|
2020-03-10 16:50:33 +01:00
|
|
|
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
2020-03-09 03:39:41 +01:00
|
|
|
auto& scheduler = system.Kernel().Scheduler(old_core);
|
2020-03-10 16:50:33 +01:00
|
|
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
|
|
|
scheduler.Unload();
|
|
|
|
auto& next_scheduler = system.Kernel().Scheduler(current_core);
|
|
|
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
|
|
|
/// May have changed scheduler
|
|
|
|
auto& current_scheduler = system.Kernel().Scheduler(current_core);
|
|
|
|
current_scheduler.Reload();
|
2020-03-09 03:39:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::SingleCorePause(bool paused) {
|
|
|
|
if (!paused) {
|
|
|
|
bool all_not_barrier = false;
|
|
|
|
while (!all_not_barrier) {
|
|
|
|
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
|
|
|
|
}
|
|
|
|
core_data[0].enter_barrier->Set();
|
|
|
|
if (paused_state.load()) {
|
|
|
|
bool all_barrier = false;
|
|
|
|
while (!all_barrier) {
|
|
|
|
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
|
|
|
}
|
|
|
|
core_data[0].exit_barrier->Set();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/// Wait until all cores are paused.
|
|
|
|
bool all_barrier = false;
|
|
|
|
while (!all_barrier) {
|
|
|
|
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
|
|
|
}
|
|
|
|
/// Don't release the barrier
|
|
|
|
}
|
|
|
|
paused_state = paused;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CpuManager::Pause(bool paused) {
|
|
|
|
if (is_multicore) {
|
|
|
|
MultiCorePause(paused);
|
|
|
|
} else {
|
|
|
|
SingleCorePause(paused);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 03:04:12 +01:00
|
|
|
void CpuManager::RunThread(std::size_t core) {
|
|
|
|
/// Initialization
|
|
|
|
system.RegisterCoreThread(core);
|
2020-03-09 03:39:41 +01:00
|
|
|
std::string name;
|
|
|
|
if (is_multicore) {
|
|
|
|
name = "yuzu:CoreCPUThread_" + std::to_string(core);
|
|
|
|
} else {
|
|
|
|
name = "yuzu:CPUThread";
|
|
|
|
}
|
2020-02-25 16:12:46 +01:00
|
|
|
MicroProfileOnThreadCreate(name.c_str());
|
2020-02-25 03:04:12 +01:00
|
|
|
Common::SetCurrentThreadName(name.c_str());
|
|
|
|
auto& data = core_data[core];
|
|
|
|
data.enter_barrier = std::make_unique<Common::Event>();
|
|
|
|
data.exit_barrier = std::make_unique<Common::Event>();
|
|
|
|
data.host_context = Common::Fiber::ThreadToFiber();
|
|
|
|
data.is_running = false;
|
|
|
|
data.initialized = true;
|
|
|
|
/// Running
|
|
|
|
while (running_mode) {
|
|
|
|
data.is_running = false;
|
|
|
|
data.enter_barrier->Wait();
|
|
|
|
auto& scheduler = system.Kernel().CurrentScheduler();
|
|
|
|
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
|
|
|
data.is_running = true;
|
|
|
|
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
|
|
|
|
data.is_running = false;
|
|
|
|
data.is_paused = true;
|
|
|
|
data.exit_barrier->Wait();
|
|
|
|
data.is_paused = false;
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
2020-02-25 03:04:12 +01:00
|
|
|
/// Time to cleanup
|
|
|
|
data.host_context->Exit();
|
|
|
|
data.enter_barrier.reset();
|
|
|
|
data.exit_barrier.reset();
|
|
|
|
data.initialized = false;
|
2018-11-22 07:27:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Core
|