mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-27 01:02:48 +01:00
Merge pull request #4356 from lioncash/inc
cpu_manager: Minor tidying up/header inclusions
This commit is contained in:
commit
d84d9a64b3
5 changed files with 15 additions and 11 deletions
|
@ -9,6 +9,9 @@
|
|||
#include <functional>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
|
||||
#include "common/fiber.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Common {
|
||||
|
@ -46,9 +49,9 @@ public:
|
|||
|
||||
void Pause(bool paused);
|
||||
|
||||
std::function<void(void*)> GetGuestThreadStartFunc();
|
||||
std::function<void(void*)> GetIdleThreadStartFunc();
|
||||
std::function<void(void*)> GetSuspendThreadStartFunc();
|
||||
static std::function<void(void*)> GetGuestThreadStartFunc();
|
||||
static std::function<void(void*)> GetIdleThreadStartFunc();
|
||||
static std::function<void(void*)> GetSuspendThreadStartFunc();
|
||||
void* GetStartFuncParamater();
|
||||
|
||||
void PreemptSingleCore(bool from_running_enviroment = true);
|
||||
|
@ -97,7 +100,6 @@ private:
|
|||
bool is_async_gpu{};
|
||||
bool is_multicore{};
|
||||
std::atomic<std::size_t> current_core{};
|
||||
std::size_t preemption_count{};
|
||||
std::size_t idle_count{};
|
||||
static constexpr std::size_t max_cycle_runs = 5;
|
||||
|
||||
|
|
|
@ -161,13 +161,14 @@ struct KernelCore::Impl {
|
|||
void InitializeSuspendThreads() {
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
std::string name = "Suspend Thread Id:" + std::to_string(i);
|
||||
std::function<void(void*)> init_func =
|
||||
system.GetCpuManager().GetSuspendThreadStartFunc();
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type =
|
||||
const auto type =
|
||||
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0,
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
|
|
@ -802,7 +802,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
|||
|
||||
void Scheduler::Initialize() {
|
||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc();
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
|
|
|
@ -155,7 +155,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
|||
std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc();
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
|
|
Loading…
Reference in a new issue