mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-27 09:12:46 +01:00
core: Make CPUBarrier a unique_ptr instead of a shared_ptr
This will always outlive the Cpu instances, since it's destroyed after we destroy the Cpu instances on shutdown, so there's no need for shared ownership semantics here.
This commit is contained in:
parent
b3cca34f50
commit
c34efbbd60
3 changed files with 10 additions and 11 deletions
|
@ -139,10 +139,10 @@ struct System::Impl {
|
||||||
auto main_process = Kernel::Process::Create(kernel, "main");
|
auto main_process = Kernel::Process::Create(kernel, "main");
|
||||||
kernel.MakeCurrentProcess(main_process.get());
|
kernel.MakeCurrentProcess(main_process.get());
|
||||||
|
|
||||||
cpu_barrier = std::make_shared<CpuBarrier>();
|
cpu_barrier = std::make_unique<CpuBarrier>();
|
||||||
cpu_exclusive_monitor = Cpu::MakeExclusiveMonitor(cpu_cores.size());
|
cpu_exclusive_monitor = Cpu::MakeExclusiveMonitor(cpu_cores.size());
|
||||||
for (std::size_t index = 0; index < cpu_cores.size(); ++index) {
|
for (std::size_t index = 0; index < cpu_cores.size(); ++index) {
|
||||||
cpu_cores[index] = std::make_shared<Cpu>(cpu_exclusive_monitor, cpu_barrier, index);
|
cpu_cores[index] = std::make_shared<Cpu>(cpu_exclusive_monitor, *cpu_barrier, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
||||||
|
@ -283,7 +283,7 @@ struct System::Impl {
|
||||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||||
std::shared_ptr<Tegra::DebugContext> debug_context;
|
std::shared_ptr<Tegra::DebugContext> debug_context;
|
||||||
std::shared_ptr<ExclusiveMonitor> cpu_exclusive_monitor;
|
std::shared_ptr<ExclusiveMonitor> cpu_exclusive_monitor;
|
||||||
std::shared_ptr<CpuBarrier> cpu_barrier;
|
std::unique_ptr<CpuBarrier> cpu_barrier;
|
||||||
std::array<std::shared_ptr<Cpu>, NUM_CPU_CORES> cpu_cores;
|
std::array<std::shared_ptr<Cpu>, NUM_CPU_CORES> cpu_cores;
|
||||||
std::array<std::unique_ptr<std::thread>, NUM_CPU_CORES - 1> cpu_core_threads;
|
std::array<std::unique_ptr<std::thread>, NUM_CPU_CORES - 1> cpu_core_threads;
|
||||||
std::size_t active_core{}; ///< Active core, only used in single thread mode
|
std::size_t active_core{}; ///< Active core, only used in single thread mode
|
||||||
|
|
|
@ -49,10 +49,9 @@ bool CpuBarrier::Rendezvous() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Cpu::Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
Cpu::Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor, CpuBarrier& cpu_barrier,
|
||||||
std::shared_ptr<CpuBarrier> cpu_barrier, std::size_t core_index)
|
std::size_t core_index)
|
||||||
: cpu_barrier{std::move(cpu_barrier)}, core_index{core_index} {
|
: cpu_barrier{cpu_barrier}, core_index{core_index} {
|
||||||
|
|
||||||
if (Settings::values.use_cpu_jit) {
|
if (Settings::values.use_cpu_jit) {
|
||||||
#ifdef ARCHITECTURE_x86_64
|
#ifdef ARCHITECTURE_x86_64
|
||||||
arm_interface = std::make_unique<ARM_Dynarmic>(exclusive_monitor, core_index);
|
arm_interface = std::make_unique<ARM_Dynarmic>(exclusive_monitor, core_index);
|
||||||
|
@ -83,7 +82,7 @@ std::shared_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_core
|
||||||
|
|
||||||
void Cpu::RunLoop(bool tight_loop) {
|
void Cpu::RunLoop(bool tight_loop) {
|
||||||
// Wait for all other CPU cores to complete the previous slice, such that they run in lock-step
|
// Wait for all other CPU cores to complete the previous slice, such that they run in lock-step
|
||||||
if (!cpu_barrier->Rendezvous()) {
|
if (!cpu_barrier.Rendezvous()) {
|
||||||
// If rendezvous failed, session has been killed
|
// If rendezvous failed, session has been killed
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,8 +41,8 @@ private:
|
||||||
|
|
||||||
class Cpu {
|
class Cpu {
|
||||||
public:
|
public:
|
||||||
Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor, CpuBarrier& cpu_barrier,
|
||||||
std::shared_ptr<CpuBarrier> cpu_barrier, std::size_t core_index);
|
std::size_t core_index);
|
||||||
~Cpu();
|
~Cpu();
|
||||||
|
|
||||||
void RunLoop(bool tight_loop = true);
|
void RunLoop(bool tight_loop = true);
|
||||||
|
@ -77,7 +77,7 @@ private:
|
||||||
void Reschedule();
|
void Reschedule();
|
||||||
|
|
||||||
std::unique_ptr<ARM_Interface> arm_interface;
|
std::unique_ptr<ARM_Interface> arm_interface;
|
||||||
std::shared_ptr<CpuBarrier> cpu_barrier;
|
CpuBarrier& cpu_barrier;
|
||||||
std::shared_ptr<Kernel::Scheduler> scheduler;
|
std::shared_ptr<Kernel::Scheduler> scheduler;
|
||||||
|
|
||||||
std::atomic<bool> reschedule_pending = false;
|
std::atomic<bool> reschedule_pending = false;
|
||||||
|
|
Loading…
Reference in a new issue