2014-12-17 06:38:14 +01:00
|
|
|
// Copyright 2014 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 09:49:13 +01:00
|
|
|
// Refer to the license.txt file included.
|
2014-05-10 04:11:18 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
#include <atomic>
|
|
|
|
#include <memory>
|
|
|
|
#include <mutex>
|
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/logging/log.h"
|
|
|
|
|
|
|
|
#include "core/core.h"
|
|
|
|
#include "core/core_timing.h"
|
2019-09-10 17:04:40 +02:00
|
|
|
#include "core/core_timing_util.h"
|
2019-03-05 17:54:06 +01:00
|
|
|
#include "core/hle/kernel/address_arbiter.h"
|
2018-09-02 17:58:58 +02:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2019-10-08 00:57:13 +02:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2017-05-30 01:45:42 +02:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2016-09-21 08:52:38 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2015-05-04 05:01:16 +02:00
|
|
|
#include "core/hle/kernel/process.h"
|
2015-08-06 02:26:52 +02:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2019-03-29 22:02:57 +01:00
|
|
|
#include "core/hle/kernel/scheduler.h"
|
2014-05-10 04:11:18 +02:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2018-08-28 18:30:33 +02:00
|
|
|
#include "core/hle/lock.h"
|
|
|
|
#include "core/hle/result.h"
|
2019-04-07 07:10:44 +02:00
|
|
|
#include "core/memory.h"
|
2014-05-10 04:11:18 +02:00
|
|
|
|
2014-05-21 00:13:25 +02:00
|
|
|
namespace Kernel {
|
2014-05-10 04:11:18 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
/**
|
|
|
|
* Callback that will wake up the thread it was scheduled for
|
|
|
|
* @param thread_handle The handle of the thread that's been awoken
|
|
|
|
* @param cycles_late The number of CPU cycles that have passed since the desired wakeup time
|
|
|
|
*/
|
2019-03-24 23:11:32 +01:00
|
|
|
static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) {
|
2018-08-28 18:30:33 +02:00
|
|
|
const auto proper_handle = static_cast<Handle>(thread_handle);
|
2018-10-28 22:44:46 +01:00
|
|
|
const auto& system = Core::System::GetInstance();
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
// Lock the global kernel mutex when we enter the kernel HLE.
|
2019-04-01 18:29:59 +02:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
std::shared_ptr<Thread> thread =
|
2018-08-28 18:30:33 +02:00
|
|
|
system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
|
|
|
|
if (thread == nullptr) {
|
|
|
|
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool resume = true;
|
|
|
|
|
2019-04-17 13:21:19 +02:00
|
|
|
if (thread->GetStatus() == ThreadStatus::WaitSynch ||
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
|
2018-08-28 18:30:33 +02:00
|
|
|
// Remove the thread from each of its waiting objects' waitlists
|
2018-10-04 00:47:57 +02:00
|
|
|
for (const auto& object : thread->GetWaitObjects()) {
|
2019-11-25 02:15:51 +01:00
|
|
|
object->RemoveWaitingThread(thread);
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->ClearWaitObjects();
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
// Invoke the wakeup callback before clearing the wait objects
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread->HasWakeupCallback()) {
|
|
|
|
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
2019-10-08 00:57:13 +02:00
|
|
|
} else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
|
|
|
|
thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetMutexWaitAddress(0);
|
|
|
|
thread->SetWaitHandle(0);
|
2019-11-15 01:13:18 +01:00
|
|
|
if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
|
|
|
thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
|
2019-11-21 16:03:37 +01:00
|
|
|
thread->SetCondVarWaitAddress(0);
|
2019-11-15 01:13:18 +01:00
|
|
|
}
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
auto* const lock_owner = thread->GetLockOwner();
|
2018-08-28 18:30:33 +02:00
|
|
|
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
|
|
|
|
// and don't have a lock owner unless SignalProcessWideKey was called first and the thread
|
|
|
|
// wasn't awakened due to the mutex already being acquired.
|
2018-10-04 00:47:57 +02:00
|
|
|
if (lock_owner != nullptr) {
|
2018-08-28 18:30:33 +02:00
|
|
|
lock_owner->RemoveMutexWaiter(thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread->GetArbiterWaitAddress() != 0) {
|
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
|
|
|
|
thread->SetArbiterWaitAddress(0);
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (resume) {
|
2019-10-08 00:57:13 +02:00
|
|
|
if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
|
|
|
|
thread->GetStatus() == ThreadStatus::WaitArb) {
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
|
|
|
}
|
2018-08-28 18:30:33 +02:00
|
|
|
thread->ResumeFromWait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct KernelCore::Impl {
|
2019-06-19 15:11:18 +02:00
|
|
|
explicit Impl(Core::System& system) : system{system}, global_scheduler{system} {}
|
2019-03-05 18:28:10 +01:00
|
|
|
|
|
|
|
void Initialize(KernelCore& kernel) {
|
2018-08-28 18:30:33 +02:00
|
|
|
Shutdown();
|
2015-04-28 04:12:35 +02:00
|
|
|
|
2018-11-19 18:54:06 +01:00
|
|
|
InitializeSystemResourceLimit(kernel);
|
2019-03-05 18:28:10 +01:00
|
|
|
InitializeThreads();
|
2019-09-10 17:04:40 +02:00
|
|
|
InitializePreemption();
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void Shutdown() {
|
|
|
|
next_object_id = 0;
|
2019-06-10 06:28:33 +02:00
|
|
|
next_kernel_process_id = Process::InitialKIPIDMin;
|
|
|
|
next_user_process_id = Process::ProcessIDMin;
|
2018-08-28 18:30:33 +02:00
|
|
|
next_thread_id = 1;
|
|
|
|
|
|
|
|
process_list.clear();
|
2018-10-10 06:42:10 +02:00
|
|
|
current_process = nullptr;
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-11-19 18:54:06 +01:00
|
|
|
system_resource_limit = nullptr;
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
thread_wakeup_callback_handle_table.Clear();
|
|
|
|
thread_wakeup_event_type = nullptr;
|
2019-09-10 17:04:40 +02:00
|
|
|
preemption_event = nullptr;
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-10-12 14:21:51 +02:00
|
|
|
global_scheduler.Shutdown();
|
|
|
|
|
2018-09-02 17:58:58 +02:00
|
|
|
named_ports.clear();
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
2018-11-19 18:54:06 +01:00
|
|
|
// Creates the default system resource limit
|
|
|
|
void InitializeSystemResourceLimit(KernelCore& kernel) {
|
2019-04-01 22:46:00 +02:00
|
|
|
system_resource_limit = ResourceLimit::Create(kernel);
|
2018-11-19 18:54:06 +01:00
|
|
|
|
|
|
|
// If setting the default system values fails, then something seriously wrong has occurred.
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
|
|
|
|
.IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess());
|
|
|
|
ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess());
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
2019-03-05 18:28:10 +01:00
|
|
|
void InitializeThreads() {
|
2018-08-28 18:30:33 +02:00
|
|
|
thread_wakeup_event_type =
|
2019-11-27 03:48:56 +01:00
|
|
|
Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
2019-09-10 17:04:40 +02:00
|
|
|
void InitializePreemption() {
|
2019-11-27 03:48:56 +01:00
|
|
|
preemption_event =
|
|
|
|
Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
|
2019-09-10 17:04:40 +02:00
|
|
|
global_scheduler.PreemptThreads();
|
|
|
|
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
});
|
|
|
|
|
|
|
|
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
|
|
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
std::atomic<u32> next_object_id{0};
|
2019-06-10 06:28:33 +02:00
|
|
|
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
|
|
|
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
2018-12-19 04:37:01 +01:00
|
|
|
std::atomic<u64> next_thread_id{1};
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
// Lists all processes that exist in the current session.
|
2019-11-25 02:15:51 +01:00
|
|
|
std::vector<std::shared_ptr<Process>> process_list;
|
2018-10-10 06:42:10 +02:00
|
|
|
Process* current_process = nullptr;
|
2019-03-29 22:02:57 +01:00
|
|
|
Kernel::GlobalScheduler global_scheduler;
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
std::shared_ptr<ResourceLimit> system_resource_limit;
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-11-27 03:48:56 +01:00
|
|
|
std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
|
|
|
|
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
|
|
|
|
// allowing us to simply use a pool index or similar.
|
|
|
|
Kernel::HandleTable thread_wakeup_callback_handle_table;
|
2018-09-02 17:58:58 +02:00
|
|
|
|
|
|
|
/// Map of named ports managed by the kernel, which can be retrieved using
|
|
|
|
/// the ConnectToPort SVC.
|
|
|
|
NamedPortTable named_ports;
|
2019-03-05 18:28:10 +01:00
|
|
|
|
|
|
|
// System context
|
|
|
|
Core::System& system;
|
2018-08-28 18:30:33 +02:00
|
|
|
};
|
|
|
|
|
2019-03-05 18:28:10 +01:00
|
|
|
KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
|
2018-08-28 18:30:33 +02:00
|
|
|
KernelCore::~KernelCore() {
|
|
|
|
Shutdown();
|
|
|
|
}
|
|
|
|
|
2019-03-05 18:28:10 +01:00
|
|
|
void KernelCore::Initialize() {
|
|
|
|
impl->Initialize(*this);
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void KernelCore::Shutdown() {
|
|
|
|
impl->Shutdown();
|
|
|
|
}
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
2018-11-19 18:54:06 +01:00
|
|
|
return impl->system_resource_limit;
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
std::shared_ptr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable(
|
|
|
|
Handle handle) const {
|
2018-08-28 18:30:33 +02:00
|
|
|
return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle);
|
|
|
|
}
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
|
2018-08-28 18:30:33 +02:00
|
|
|
impl->process_list.push_back(std::move(process));
|
|
|
|
}
|
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
void KernelCore::MakeCurrentProcess(Process* process) {
|
|
|
|
impl->current_process = process;
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
|
|
|
|
|
|
|
if (process == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Memory::SetCurrentPageTable(*process);
|
2018-09-07 02:34:51 +02:00
|
|
|
}
|
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
Process* KernelCore::CurrentProcess() {
|
2018-09-07 02:34:51 +02:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
const Process* KernelCore::CurrentProcess() const {
|
2018-09-07 02:34:51 +02:00
|
|
|
return impl->current_process;
|
|
|
|
}
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const {
|
2019-03-20 20:03:52 +01:00
|
|
|
return impl->process_list;
|
|
|
|
}
|
|
|
|
|
2019-03-29 22:02:57 +01:00
|
|
|
Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
|
|
|
|
return impl->global_scheduler;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
|
|
|
|
return impl->global_scheduler;
|
|
|
|
}
|
|
|
|
|
2019-11-25 02:15:51 +01:00
|
|
|
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
|
2018-09-02 17:58:58 +02:00
|
|
|
impl->named_ports.emplace(std::move(name), std::move(port));
|
|
|
|
}
|
|
|
|
|
|
|
|
KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) {
|
|
|
|
return impl->named_ports.find(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
KernelCore::NamedPortTable::const_iterator KernelCore::FindNamedPort(
|
|
|
|
const std::string& name) const {
|
|
|
|
return impl->named_ports.find(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
|
|
|
|
return port != impl->named_ports.cend();
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
u32 KernelCore::CreateNewObjectID() {
|
|
|
|
return impl->next_object_id++;
|
|
|
|
}
|
|
|
|
|
2018-12-19 04:37:01 +01:00
|
|
|
u64 KernelCore::CreateNewThreadID() {
|
2018-08-28 18:30:33 +02:00
|
|
|
return impl->next_thread_id++;
|
2014-05-21 00:13:25 +02:00
|
|
|
}
|
|
|
|
|
2019-06-10 06:28:33 +02:00
|
|
|
u64 KernelCore::CreateNewKernelProcessID() {
|
|
|
|
return impl->next_kernel_process_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 KernelCore::CreateNewUserProcessID() {
|
|
|
|
return impl->next_user_process_id++;
|
2018-08-28 18:30:33 +02:00
|
|
|
}
|
2015-08-06 02:26:52 +02:00
|
|
|
|
2019-11-27 03:48:56 +01:00
|
|
|
const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
|
2018-08-28 18:30:33 +02:00
|
|
|
return impl->thread_wakeup_event_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() {
|
|
|
|
return impl->thread_wakeup_callback_handle_table;
|
|
|
|
}
|
2015-08-06 02:26:52 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
const Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() const {
|
|
|
|
return impl->thread_wakeup_callback_handle_table;
|
2014-05-14 03:57:12 +02:00
|
|
|
}
|
2014-05-23 01:06:12 +02:00
|
|
|
|
2018-01-01 19:25:37 +01:00
|
|
|
} // namespace Kernel
|