2018-01-13 22:22:39 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
2014-12-17 06:38:14 +01:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 09:49:13 +01:00
|
|
|
// Refer to the license.txt file included.
|
2014-04-11 01:58:28 +02:00
|
|
|
|
2018-01-12 04:36:56 +01:00
|
|
|
#include <algorithm>
|
2018-02-25 13:40:22 +01:00
|
|
|
#include <cinttypes>
|
2018-04-20 04:36:48 +02:00
|
|
|
#include <iterator>
|
2018-07-31 14:06:09 +02:00
|
|
|
#include <mutex>
|
|
|
|
#include <vector>
|
2018-01-12 04:36:56 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
#include "common/alignment.h"
|
2018-07-31 14:06:09 +02:00
|
|
|
#include "common/assert.h"
|
2015-05-06 09:06:12 +02:00
|
|
|
#include "common/logging/log.h"
|
2015-08-17 23:25:21 +02:00
|
|
|
#include "common/microprofile.h"
|
2018-01-05 01:45:15 +01:00
|
|
|
#include "common/string_util.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-03-13 22:49:59 +01:00
|
|
|
#include "core/core.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/core_cpu.h"
|
2016-09-18 02:38:01 +02:00
|
|
|
#include "core/core_timing.h"
|
2018-06-21 08:49:43 +02:00
|
|
|
#include "core/hle/kernel/address_arbiter.h"
|
2016-05-22 19:30:13 +02:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2016-06-15 01:03:30 +02:00
|
|
|
#include "core/hle/kernel/client_session.h"
|
2018-01-08 03:24:19 +01:00
|
|
|
#include "core/hle/kernel/event.h"
|
2017-05-30 01:45:42 +02:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2018-01-01 20:38:34 +01:00
|
|
|
#include "core/hle/kernel/mutex.h"
|
2015-05-11 16:15:10 +02:00
|
|
|
#include "core/hle/kernel/process.h"
|
2017-12-31 21:58:16 +01:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/hle/kernel/scheduler.h"
|
2018-01-14 23:15:31 +01:00
|
|
|
#include "core/hle/kernel/shared_memory.h"
|
2018-01-03 02:40:30 +01:00
|
|
|
#include "core/hle/kernel/svc.h"
|
|
|
|
#include "core/hle/kernel/svc_wrap.h"
|
2017-10-15 04:18:42 +02:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2017-10-14 23:30:07 +02:00
|
|
|
#include "core/hle/lock.h"
|
2014-10-23 05:20:01 +02:00
|
|
|
#include "core/hle/result.h"
|
2014-04-13 03:55:36 +02:00
|
|
|
#include "core/hle/service/service.h"
|
2014-04-11 01:58:28 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
namespace Kernel {
|
2018-09-14 01:14:50 +02:00
|
|
|
namespace {
|
2018-10-10 20:18:27 +02:00
|
|
|
|
|
|
|
// Checks if address + size is greater than the given address
|
|
|
|
// This can return false if the size causes an overflow of a 64-bit type
|
|
|
|
// or if the given size is zero.
|
|
|
|
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
|
|
|
|
return address + size > address;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks if a given address range lies within a larger address range.
|
|
|
|
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
|
|
|
|
VAddr address_range_end) {
|
|
|
|
const VAddr end_address = address + size - 1;
|
|
|
|
return address_range_begin <= address && end_address <= address_range_end - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) {
|
|
|
|
return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(),
|
|
|
|
vm.GetAddressSpaceEndAddress());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) {
|
|
|
|
return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(),
|
|
|
|
vm.GetNewMapRegionEndAddress());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Helper function that performs the common sanity checks for svcMapMemory
|
|
|
|
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
|
|
|
// in the same order.
|
|
|
|
ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_addr, VAddr src_addr,
|
|
|
|
u64 size) {
|
2018-10-18 18:55:27 +02:00
|
|
|
if (!Common::Is4KBAligned(dst_addr) || !Common::Is4KBAligned(src_addr)) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_addr, size)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_addr, size)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsInsideAddressSpace(vm_manager, src_addr, size)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) {
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VAddr dst_end_address = dst_addr + size;
|
|
|
|
if (dst_end_address > vm_manager.GetHeapRegionBaseAddress() &&
|
2018-10-12 07:43:15 +02:00
|
|
|
vm_manager.GetHeapRegionEndAddress() > dst_addr) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
2018-10-12 07:43:15 +02:00
|
|
|
if (dst_end_address > vm_manager.GetMapRegionBaseAddress() &&
|
|
|
|
vm_manager.GetMapRegionEndAddress() > dst_addr) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-09-14 01:14:50 +02:00
|
|
|
} // Anonymous namespace
|
2014-04-11 05:26:12 +02:00
|
|
|
|
2017-12-28 21:29:52 +01:00
|
|
|
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
|
|
|
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
|
2018-09-14 01:09:04 +02:00
|
|
|
|
|
|
|
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
|
|
|
|
if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-03-13 22:49:59 +01:00
|
|
|
auto& process = *Core::CurrentProcess();
|
2018-09-30 00:47:00 +02:00
|
|
|
const VAddr heap_base = process.VMManager().GetHeapRegionBaseAddress();
|
2018-01-03 04:24:12 +01:00
|
|
|
CASCADE_RESULT(*heap_addr,
|
2018-09-25 02:01:45 +02:00
|
|
|
process.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite));
|
2017-12-28 21:29:52 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-08 03:23:42 +01:00
|
|
|
static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state1) {
|
2018-07-24 15:46:42 +02:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
|
|
|
"(STUBBED) called, addr=0x{:X}, size=0x{:X}, state0=0x{:X}, state1=0x{:X}", addr,
|
|
|
|
size, state0, state1);
|
2018-01-08 03:23:42 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-29 03:38:38 +01:00
|
|
|
/// Maps a memory range into a different range.
|
|
|
|
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 18:20:50 +02:00
|
|
|
src_addr, size);
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
auto* const current_process = Core::CurrentProcess();
|
|
|
|
const auto& vm_manager = current_process->VMManager();
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
|
|
|
|
if (result != RESULT_SUCCESS) {
|
|
|
|
return result;
|
2018-09-14 01:14:50 +02:00
|
|
|
}
|
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
return current_process->MirrorMemory(dst_addr, src_addr, size);
|
2017-12-29 03:38:38 +01:00
|
|
|
}
|
|
|
|
|
2017-12-31 21:22:49 +01:00
|
|
|
/// Unmaps a region that was previously mapped with svcMapMemory
|
|
|
|
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 18:20:50 +02:00
|
|
|
src_addr, size);
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
auto* const current_process = Core::CurrentProcess();
|
|
|
|
const auto& vm_manager = current_process->VMManager();
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
|
|
|
|
if (result != RESULT_SUCCESS) {
|
|
|
|
return result;
|
2018-09-14 01:14:50 +02:00
|
|
|
}
|
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
return current_process->UnmapMemory(dst_addr, src_addr, size);
|
2017-12-31 21:22:49 +01:00
|
|
|
}
|
|
|
|
|
2014-04-13 03:55:36 +02:00
|
|
|
/// Connect to an OS service given the port name, returns the handle to the port to out
|
2018-01-18 02:34:52 +01:00
|
|
|
static ResultCode ConnectToNamedPort(Handle* out_handle, VAddr port_name_address) {
|
2018-09-02 17:58:58 +02:00
|
|
|
if (!Memory::IsValidVirtualAddress(port_name_address)) {
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_FOUND;
|
2018-09-02 17:58:58 +02:00
|
|
|
}
|
2017-10-14 23:30:07 +02:00
|
|
|
|
|
|
|
static constexpr std::size_t PortNameMaxLength = 11;
|
|
|
|
// Read 1 char beyond the max allowed port name to detect names that are too long.
|
|
|
|
std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1);
|
2018-09-02 17:58:58 +02:00
|
|
|
if (port_name.size() > PortNameMaxLength) {
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_PORT_NAME_TOO_LONG;
|
2018-09-02 17:58:58 +02:00
|
|
|
}
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2018-09-02 17:58:58 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto it = kernel.FindNamedPort(port_name);
|
|
|
|
if (!kernel.IsValidNamedPort(it)) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_FOUND;
|
2015-01-30 19:07:04 +01:00
|
|
|
}
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2016-12-05 17:02:08 +01:00
|
|
|
auto client_port = it->second;
|
2016-06-15 01:03:30 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
SharedPtr<ClientSession> client_session;
|
2016-12-05 19:59:57 +01:00
|
|
|
CASCADE_RESULT(client_session, client_port->Connect());
|
2016-06-15 01:03:30 +02:00
|
|
|
|
|
|
|
// Return the client session
|
2018-08-28 18:30:33 +02:00
|
|
|
CASCADE_RESULT(*out_handle, kernel.HandleTable().Create(client_session));
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-04-13 03:55:36 +02:00
|
|
|
}
|
|
|
|
|
2016-12-08 17:06:19 +01:00
|
|
|
/// Makes a blocking IPC call to an OS service.
|
2018-01-03 02:40:30 +01:00
|
|
|
static ResultCode SendSyncRequest(Handle handle) {
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<ClientSession> session = kernel.HandleTable().Get<ClientSession>(handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!session) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
2015-01-23 06:44:52 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2014-10-23 05:20:01 +02:00
|
|
|
}
|
2014-05-27 04:12:46 +02:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
2014-05-27 04:12:46 +02:00
|
|
|
|
2017-01-01 17:57:02 +01:00
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
|
2016-12-14 18:33:49 +01:00
|
|
|
// TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server
|
|
|
|
// responds and cause a reschedule.
|
2018-01-03 02:40:30 +01:00
|
|
|
return session->SendSyncRequest(GetCurrentThread());
|
2014-04-11 01:58:28 +02:00
|
|
|
}
|
|
|
|
|
2017-10-23 06:15:45 +02:00
|
|
|
/// Get the ID for the specified thread.
|
2018-01-03 02:40:30 +01:00
|
|
|
static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-10-23 06:15:45 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!thread) {
|
2017-10-23 06:15:45 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
*thread_id = thread->GetThreadID();
|
2017-10-23 06:15:45 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the ID of the specified process
|
2018-01-03 02:40:30 +01:00
|
|
|
static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X}", process_handle);
|
2017-10-23 06:15:45 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Process> process = kernel.HandleTable().Get<Process>(process_handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!process) {
|
2017-10-23 06:15:45 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-09-21 08:06:47 +02:00
|
|
|
*process_id = process->GetProcessID();
|
2017-10-23 06:15:45 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-06 20:19:28 +01:00
|
|
|
/// Default thread wakeup callback for WaitSynchronization
|
2018-01-08 17:35:03 +01:00
|
|
|
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
2018-09-15 15:21:06 +02:00
|
|
|
SharedPtr<WaitObject> object, std::size_t index) {
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
|
2018-01-06 20:19:28 +01:00
|
|
|
|
|
|
|
if (reason == ThreadWakeupReason::Timeout) {
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
2018-01-08 17:35:03 +01:00
|
|
|
return true;
|
2018-01-06 20:19:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(reason == ThreadWakeupReason::Signal);
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
2018-01-09 21:02:43 +01:00
|
|
|
thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
|
2018-01-08 17:35:03 +01:00
|
|
|
return true;
|
2018-01-06 20:19:28 +01:00
|
|
|
};
|
|
|
|
|
2018-01-01 20:47:57 +01:00
|
|
|
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
2018-01-09 17:53:50 +01:00
|
|
|
static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 handle_count,
|
|
|
|
s64 nano_seconds) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
2018-07-02 18:20:50 +02:00
|
|
|
handles_address, handle_count, nano_seconds);
|
2018-01-06 20:34:32 +01:00
|
|
|
|
|
|
|
if (!Memory::IsValidVirtualAddress(handles_address))
|
|
|
|
return ERR_INVALID_POINTER;
|
|
|
|
|
2018-01-09 21:02:43 +01:00
|
|
|
static constexpr u64 MaxHandles = 0x40;
|
|
|
|
|
|
|
|
if (handle_count > MaxHandles)
|
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge);
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
auto* const thread = GetCurrentThread();
|
2018-01-09 17:53:50 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
using ObjectPtr = Thread::ThreadWaitObjects::value_type;
|
|
|
|
Thread::ThreadWaitObjects objects(handle_count);
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2018-07-24 15:55:15 +02:00
|
|
|
for (u64 i = 0; i < handle_count; ++i) {
|
|
|
|
const Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
|
2018-08-28 18:30:33 +02:00
|
|
|
const auto object = kernel.HandleTable().Get<WaitObject>(handle);
|
2018-07-24 15:55:15 +02:00
|
|
|
|
|
|
|
if (object == nullptr) {
|
2018-01-06 20:34:32 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2018-07-24 15:55:15 +02:00
|
|
|
}
|
|
|
|
|
2018-01-06 20:34:32 +01:00
|
|
|
objects[i] = object;
|
|
|
|
}
|
|
|
|
|
2018-01-09 17:53:50 +01:00
|
|
|
// Find the first object that is acquirable in the provided list of objects
|
|
|
|
auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) {
|
|
|
|
return !object->ShouldWait(thread);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (itr != objects.end()) {
|
|
|
|
// We found a ready object, acquire it and set the result value
|
|
|
|
WaitObject* object = itr->get();
|
|
|
|
object->Acquire(thread);
|
|
|
|
*index = static_cast<s32>(std::distance(objects.begin(), itr));
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No objects were ready to be acquired, prepare to suspend the thread.
|
|
|
|
|
|
|
|
// If a timeout value of 0 was provided, just return the Timeout error code instead of
|
|
|
|
// suspending the thread.
|
|
|
|
if (nano_seconds == 0)
|
|
|
|
return RESULT_TIMEOUT;
|
|
|
|
|
2018-01-09 21:02:43 +01:00
|
|
|
for (auto& object : objects)
|
|
|
|
object->AddWaitingThread(thread);
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetWaitObjects(std::move(objects));
|
|
|
|
thread->SetStatus(ThreadStatus::WaitSynchAny);
|
2018-01-09 21:02:43 +01:00
|
|
|
|
|
|
|
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
|
|
|
thread->WakeAfterDelay(nano_seconds);
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetWakeupCallback(DefaultThreadWakeupCallback);
|
2018-01-09 21:02:43 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
2018-01-09 21:02:43 +01:00
|
|
|
|
|
|
|
return RESULT_TIMEOUT;
|
2018-01-01 20:47:57 +01:00
|
|
|
}
|
|
|
|
|
2018-01-09 21:02:04 +01:00
|
|
|
/// Resumes a thread waiting on WaitSynchronization
|
|
|
|
static ResultCode CancelSynchronization(Handle thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
2018-01-09 21:02:04 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-01-09 21:02:04 +01:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny);
|
2018-01-09 21:02:04 +01:00
|
|
|
thread->SetWaitSynchronizationResult(
|
|
|
|
ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled));
|
|
|
|
thread->ResumeFromWait();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-01 20:02:26 +01:00
|
|
|
/// Attempts to locks a mutex, creating it if it does not already exist
|
2018-01-18 02:34:52 +01:00
|
|
|
static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
|
|
|
|
Handle requesting_thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
|
|
|
|
"requesting_current_thread_handle=0x{:08X}",
|
|
|
|
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
2018-01-01 20:02:26 +01:00
|
|
|
|
2018-09-18 00:49:51 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 19:01:26 +02:00
|
|
|
if (!Common::IsWordAligned(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& handle_table = Core::System::GetInstance().Kernel().HandleTable();
|
|
|
|
return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
|
|
|
|
requesting_thread_handle);
|
2018-01-01 20:02:26 +01:00
|
|
|
}
|
|
|
|
|
2018-01-01 20:04:36 +01:00
|
|
|
/// Unlock a mutex
|
2018-01-18 02:34:52 +01:00
|
|
|
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
2018-01-01 20:04:36 +01:00
|
|
|
|
2018-09-18 00:49:51 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 19:01:26 +02:00
|
|
|
if (!Common::IsWordAligned(mutex_addr)) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-04-20 19:01:14 +02:00
|
|
|
return Mutex::Release(mutex_addr);
|
2018-01-01 20:04:36 +01:00
|
|
|
}
|
|
|
|
|
2018-10-23 06:17:13 +02:00
|
|
|
enum class BreakType : u32 {
|
2018-10-23 06:03:59 +02:00
|
|
|
Panic = 0,
|
2018-10-23 06:17:13 +02:00
|
|
|
AssertionFailed = 1,
|
2018-10-23 06:03:59 +02:00
|
|
|
PreNROLoad = 3,
|
|
|
|
PostNROLoad = 4,
|
|
|
|
PreNROUnload = 5,
|
|
|
|
PostNROUnload = 6,
|
|
|
|
};
|
|
|
|
|
2018-10-09 03:11:14 +02:00
|
|
|
struct BreakReason {
|
|
|
|
union {
|
2018-10-10 03:23:50 +02:00
|
|
|
u32 raw;
|
2018-10-23 06:03:59 +02:00
|
|
|
BitField<0, 30, BreakType> break_type;
|
2018-10-10 03:27:44 +02:00
|
|
|
BitField<31, 1, u32> signal_debugger;
|
2018-10-09 03:11:14 +02:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Break program execution
|
2018-10-10 03:23:50 +02:00
|
|
|
static void Break(u32 reason, u64 info1, u64 info2) {
|
2018-10-09 03:11:14 +02:00
|
|
|
BreakReason break_reason{reason};
|
2018-10-23 06:03:59 +02:00
|
|
|
|
|
|
|
switch (break_reason.break_type) {
|
|
|
|
case BreakType::Panic:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
|
|
|
break;
|
|
|
|
case BreakType::AssertionFailed:
|
|
|
|
LOG_CRITICAL(Debug_Emulated,
|
|
|
|
"Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROLoad:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
|
|
|
Debug_Emulated,
|
|
|
|
"Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PostNROLoad:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROUnload:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
2018-10-09 02:10:30 +02:00
|
|
|
Debug_Emulated,
|
2018-10-23 06:03:59 +02:00
|
|
|
"Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
|
|
|
break;
|
|
|
|
case BreakType::PostNROUnload:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
default:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
|
|
|
Debug_Emulated,
|
|
|
|
"Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
static_cast<u32>(break_reason.break_type.Value()), info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!break_reason.signal_debugger) {
|
2018-10-09 03:11:14 +02:00
|
|
|
LOG_CRITICAL(
|
2018-10-09 02:10:30 +02:00
|
|
|
Debug_Emulated,
|
|
|
|
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
reason, info1, info2);
|
2018-10-09 03:11:14 +02:00
|
|
|
ASSERT(false);
|
2018-10-14 09:14:51 +02:00
|
|
|
|
|
|
|
Core::CurrentProcess()->PrepareForTermination();
|
|
|
|
|
|
|
|
// Kill the current thread
|
|
|
|
GetCurrentThread()->Stop();
|
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
2018-10-09 02:10:30 +02:00
|
|
|
}
|
2014-04-17 02:41:33 +02:00
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
|
2018-09-12 10:49:08 +02:00
|
|
|
static void OutputDebugString(VAddr address, u64 len) {
|
2018-09-12 10:51:41 +02:00
|
|
|
if (len == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-26 01:11:22 +02:00
|
|
|
std::string str(len, '\0');
|
|
|
|
Memory::ReadBlock(address, str.data(), str.size());
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_DEBUG(Debug_Emulated, "{}", str);
|
2014-05-18 05:37:25 +02:00
|
|
|
}
|
|
|
|
|
2018-01-01 22:01:06 +01:00
|
|
|
/// Gets system/memory information for the current process
|
2017-10-14 23:30:07 +02:00
|
|
|
static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
2018-07-02 18:20:50 +02:00
|
|
|
info_sub_id, handle);
|
2018-01-01 22:01:06 +01:00
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
const auto* current_process = Core::CurrentProcess();
|
2018-09-30 00:47:00 +02:00
|
|
|
const auto& vm_manager = current_process->VMManager();
|
2018-01-10 06:58:25 +01:00
|
|
|
|
2018-01-01 22:01:06 +01:00
|
|
|
switch (static_cast<GetInfoType>(info_id)) {
|
2018-01-10 06:58:25 +01:00
|
|
|
case GetInfoType::AllowedCpuIdBitmask:
|
2018-09-30 00:47:00 +02:00
|
|
|
*result = current_process->GetAllowedProcessorMask();
|
2018-01-10 06:58:25 +01:00
|
|
|
break;
|
2018-01-16 23:06:45 +01:00
|
|
|
case GetInfoType::AllowedThreadPrioBitmask:
|
2018-09-30 00:47:00 +02:00
|
|
|
*result = current_process->GetAllowedThreadPriorityMask();
|
2018-01-16 23:06:45 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetMapRegionBaseAddress();
|
2018-01-16 23:06:45 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::MapRegionSize:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetMapRegionSize();
|
2018-01-16 23:06:45 +01:00
|
|
|
break;
|
2018-01-15 21:42:57 +01:00
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetHeapRegionBaseAddress();
|
2018-01-15 21:42:57 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::HeapRegionSize:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetHeapRegionSize();
|
2018-01-15 21:42:57 +01:00
|
|
|
break;
|
2018-01-01 22:01:06 +01:00
|
|
|
case GetInfoType::TotalMemoryUsage:
|
|
|
|
*result = vm_manager.GetTotalMemoryUsage();
|
|
|
|
break;
|
|
|
|
case GetInfoType::TotalHeapUsage:
|
|
|
|
*result = vm_manager.GetTotalHeapUsage();
|
|
|
|
break;
|
2018-02-04 18:34:45 +01:00
|
|
|
case GetInfoType::IsCurrentProcessBeingDebugged:
|
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-01-01 22:01:06 +01:00
|
|
|
case GetInfoType::RandomEntropy:
|
|
|
|
*result = 0;
|
|
|
|
break;
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 20:44:38 +02:00
|
|
|
case GetInfoType::ASLRRegionBaseAddr:
|
|
|
|
*result = vm_manager.GetASLRRegionBaseAddress();
|
2018-01-01 22:01:06 +01:00
|
|
|
break;
|
svc: Clarify enum values for AddressSpaceBaseAddr and AddressSpaceSize in svcGetInfo()
So, one thing that's puzzled me is why the kernel seemed to *not* use
the direct code address ranges in some cases for some service functions.
For example, in svcMapMemory, the full address space width is compared
against for validity, but for svcMapSharedMemory, it compares against
0xFFE00000, 0xFF8000000, and 0x7FF8000000 as upper bounds, and uses
either 0x200000 or 0x8000000 as the lower-bounds as the beginning of the
compared range. Coincidentally, these exact same values are also used in
svcGetInfo, and also when initializing the user address space, so this
is actually retrieving the ASLR extents, not the extents of the address
space in general.
2018-10-14 20:44:38 +02:00
|
|
|
case GetInfoType::ASLRRegionSize:
|
|
|
|
*result = vm_manager.GetASLRRegionSize();
|
2018-01-01 22:01:06 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::NewMapRegionBaseAddr:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetNewMapRegionBaseAddress();
|
2018-01-01 22:01:06 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::NewMapRegionSize:
|
2018-09-24 17:16:17 +02:00
|
|
|
*result = vm_manager.GetNewMapRegionSize();
|
2018-01-01 22:01:06 +01:00
|
|
|
break;
|
2018-01-16 23:06:45 +01:00
|
|
|
case GetInfoType::IsVirtualAddressMemoryEnabled:
|
2018-09-30 00:47:00 +02:00
|
|
|
*result = current_process->IsVirtualMemoryEnabled();
|
2018-01-16 23:06:45 +01:00
|
|
|
break;
|
2018-01-15 21:42:57 +01:00
|
|
|
case GetInfoType::TitleId:
|
2018-09-30 00:47:00 +02:00
|
|
|
*result = current_process->GetTitleID();
|
2018-01-15 21:42:57 +01:00
|
|
|
break;
|
|
|
|
case GetInfoType::PrivilegedProcessId:
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
|
2018-01-15 21:42:57 +01:00
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-06-18 09:28:37 +02:00
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"(STUBBED) Attempted to query user exception context address, returned 0");
|
2018-06-18 09:28:37 +02:00
|
|
|
*result = 0;
|
|
|
|
break;
|
2018-01-01 22:01:06 +01:00
|
|
|
default:
|
|
|
|
UNIMPLEMENTED();
|
2015-05-17 07:06:59 +02:00
|
|
|
}
|
2018-01-01 22:01:06 +01:00
|
|
|
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-02 00:50:36 +02:00
|
|
|
}
|
|
|
|
|
2018-04-03 05:50:17 +02:00
|
|
|
/// Sets the thread activity
|
|
|
|
static ResultCode SetThreadActivity(Handle handle, u32 unknown) {
|
2018-07-02 18:20:50 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called, handle=0x{:08X}, unknown=0x{:08X}", handle, unknown);
|
2018-04-03 05:50:17 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the thread context
|
2018-09-30 01:58:21 +02:00
|
|
|
static ResultCode GetThreadContext(VAddr thread_context, Handle handle) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
|
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
|
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-10 06:42:10 +02:00
|
|
|
const auto* current_process = Core::CurrentProcess();
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread->GetOwnerProcess() != current_process) {
|
2018-09-30 01:58:21 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (thread == GetCurrentThread()) {
|
|
|
|
return ERR_ALREADY_REGISTERED;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::ARM_Interface::ThreadContext ctx = thread->GetContext();
|
2018-09-30 01:58:21 +02:00
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
ctx.pstate &= 0xFF0FFE20;
|
|
|
|
|
|
|
|
// If 64-bit, we can just write the context registers directly and we're good.
|
|
|
|
// However, if 32-bit, we have to ensure some registers are zeroed out.
|
|
|
|
if (!current_process->Is64BitProcess()) {
|
|
|
|
std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
|
|
|
|
std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
|
|
|
|
}
|
|
|
|
|
|
|
|
Memory::WriteBlock(thread_context, &ctx, sizeof(ctx));
|
2018-04-03 05:50:17 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-06-02 04:12:54 +02:00
|
|
|
/// Gets the priority for the specified thread
|
2017-12-31 22:06:11 +01:00
|
|
|
static ResultCode GetThreadPriority(u32* priority, Handle handle) {
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
|
2017-12-31 22:06:11 +01:00
|
|
|
if (!thread)
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
|
|
|
|
*priority = thread->GetPriority();
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-04 00:49:51 +01:00
|
|
|
}
|
|
|
|
|
2017-12-31 21:58:16 +01:00
|
|
|
/// Sets the priority for the specified thread
|
|
|
|
static ResultCode SetThreadPriority(Handle handle, u32 priority) {
|
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-09-12 10:25:53 +02:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
2017-12-31 21:58:16 +01:00
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(handle);
|
2017-12-31 21:58:16 +01:00
|
|
|
if (!thread)
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-12-31 21:58:16 +01:00
|
|
|
|
|
|
|
// Note: The kernel uses the current process's resource limit instead of
|
|
|
|
// the one from the thread owner's resource limit.
|
2018-09-30 00:47:00 +02:00
|
|
|
const ResourceLimit& resource_limit = Core::CurrentProcess()->GetResourceLimit();
|
|
|
|
if (resource_limit.GetMaxResourceValue(ResourceType::Priority) > priority) {
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_AUTHORIZED;
|
2017-12-31 21:58:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
thread->SetPriority(priority);
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
2017-12-31 21:58:16 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 22:01:04 +01:00
|
|
|
/// Get which CPU core is executing the current thread
|
|
|
|
static u32 GetCurrentProcessorNumber() {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
2018-10-04 00:47:57 +02:00
|
|
|
return GetCurrentThread()->GetProcessorID();
|
2017-12-31 22:01:04 +01:00
|
|
|
}
|
|
|
|
|
2018-01-14 23:15:31 +01:00
|
|
|
static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size,
|
|
|
|
u32 permissions) {
|
2018-07-02 18:20:50 +02:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
|
|
|
shared_memory_handle, addr, size, permissions);
|
2018-01-14 23:15:31 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-09-14 02:16:43 +02:00
|
|
|
const auto permissions_type = static_cast<MemoryPermission>(permissions);
|
|
|
|
if (permissions_type != MemoryPermission::Read &&
|
|
|
|
permissions_type != MemoryPermission::ReadWrite) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-01-14 23:15:31 +01:00
|
|
|
if (!shared_memory) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
auto* const current_process = Core::CurrentProcess();
|
|
|
|
const auto& vm_manager = current_process->VMManager();
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(addr, size)) {
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return shared_memory->Map(current_process, addr, permissions_type, MemoryPermission::DontCare);
|
2018-01-14 23:15:31 +01:00
|
|
|
}
|
|
|
|
|
2018-02-22 20:16:43 +01:00
|
|
|
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
|
2018-07-02 18:20:50 +02:00
|
|
|
shared_memory_handle, addr, size);
|
2018-02-22 20:16:43 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!shared_memory) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto* const current_process = Core::CurrentProcess();
|
|
|
|
const auto& vm_manager = current_process->VMManager();
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(addr, size)) {
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
2018-02-22 20:16:43 +01:00
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
return shared_memory->Unmap(current_process, addr);
|
2018-02-22 20:16:43 +01:00
|
|
|
}
|
|
|
|
|
2015-07-17 21:45:12 +02:00
|
|
|
/// Query process memory
|
2017-10-14 23:30:07 +02:00
|
|
|
static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_info*/,
|
2018-01-03 02:40:30 +01:00
|
|
|
Handle process_handle, u64 addr) {
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Process> process = kernel.HandleTable().Get<Process>(process_handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!process) {
|
2015-07-17 21:45:12 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-10-20 05:00:46 +02:00
|
|
|
}
|
2018-09-30 00:47:00 +02:00
|
|
|
auto vma = process->VMManager().FindVMA(addr);
|
2017-10-20 05:00:46 +02:00
|
|
|
memory_info->attributes = 0;
|
2018-10-20 20:55:39 +02:00
|
|
|
if (vma == process->VMManager().vma_map.end()) {
|
2017-10-14 23:30:07 +02:00
|
|
|
memory_info->base_address = 0;
|
2018-01-03 02:40:30 +01:00
|
|
|
memory_info->permission = static_cast<u32>(VMAPermission::None);
|
2017-10-14 23:30:07 +02:00
|
|
|
memory_info->size = 0;
|
2018-03-10 23:46:23 +01:00
|
|
|
memory_info->type = static_cast<u32>(MemoryState::Unmapped);
|
2017-10-20 05:00:46 +02:00
|
|
|
} else {
|
|
|
|
memory_info->base_address = vma->second.base;
|
|
|
|
memory_info->permission = static_cast<u32>(vma->second.permissions);
|
|
|
|
memory_info->size = vma->second.size;
|
|
|
|
memory_info->type = static_cast<u32>(vma->second.meminfo_state);
|
2017-10-14 23:30:07 +02:00
|
|
|
}
|
2017-12-30 19:40:28 +01:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} addr={:X}", process_handle, addr);
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-16 02:17:30 +02:00
|
|
|
}
|
|
|
|
|
2015-07-17 21:45:12 +02:00
|
|
|
/// Query memory
|
2017-10-14 23:30:07 +02:00
|
|
|
static ResultCode QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, VAddr addr) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, addr={:X}", addr);
|
2018-01-03 02:40:30 +01:00
|
|
|
return QueryProcessMemory(memory_info, page_info, CurrentProcess, addr);
|
2015-07-17 21:45:12 +02:00
|
|
|
}
|
|
|
|
|
2018-01-01 20:38:34 +01:00
|
|
|
/// Exits the current process
|
|
|
|
static void ExitProcess() {
|
2018-10-10 06:42:10 +02:00
|
|
|
auto* current_process = Core::CurrentProcess();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
2018-09-21 08:06:47 +02:00
|
|
|
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
|
|
|
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
|
2018-03-13 22:49:59 +01:00
|
|
|
"Process has already exited");
|
2018-01-01 20:38:34 +01:00
|
|
|
|
2018-09-21 08:06:47 +02:00
|
|
|
current_process->PrepareForTermination();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
|
|
|
// Kill the current thread
|
2018-01-03 02:40:30 +01:00
|
|
|
GetCurrentThread()->Stop();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
}
|
|
|
|
|
2017-12-31 22:10:01 +01:00
|
|
|
/// Creates a new thread
|
|
|
|
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
|
|
|
|
u32 priority, s32 processor_id) {
|
2018-09-08 14:40:24 +02:00
|
|
|
std::string name = fmt::format("thread-{:X}", entry_point);
|
2017-12-31 22:10:01 +01:00
|
|
|
|
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-09-12 10:25:53 +02:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
2017-12-31 22:10:01 +01:00
|
|
|
}
|
|
|
|
|
2018-09-30 00:47:00 +02:00
|
|
|
const ResourceLimit& resource_limit = Core::CurrentProcess()->GetResourceLimit();
|
|
|
|
if (resource_limit.GetMaxResourceValue(ResourceType::Priority) > priority) {
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_AUTHORIZED;
|
2017-12-31 22:10:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (processor_id == THREADPROCESSORID_DEFAULT) {
|
|
|
|
// Set the target CPU to the one specified in the process' exheader.
|
2018-09-30 00:47:00 +02:00
|
|
|
processor_id = Core::CurrentProcess()->GetDefaultProcessorID();
|
2017-12-31 22:10:01 +01:00
|
|
|
ASSERT(processor_id != THREADPROCESSORID_DEFAULT);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (processor_id) {
|
|
|
|
case THREADPROCESSORID_0:
|
|
|
|
case THREADPROCESSORID_1:
|
2018-01-10 06:58:25 +01:00
|
|
|
case THREADPROCESSORID_2:
|
|
|
|
case THREADPROCESSORID_3:
|
2017-12-31 22:10:01 +01:00
|
|
|
break;
|
|
|
|
default:
|
2018-09-12 10:27:35 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
2017-12-31 22:10:01 +01:00
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-01-03 02:40:30 +01:00
|
|
|
CASCADE_RESULT(SharedPtr<Thread> thread,
|
2018-08-28 18:30:33 +02:00
|
|
|
Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top,
|
2018-10-10 06:42:10 +02:00
|
|
|
*Core::CurrentProcess()));
|
2018-10-04 00:47:57 +02:00
|
|
|
const auto new_guest_handle = kernel.HandleTable().Create(thread);
|
|
|
|
if (new_guest_handle.Failed()) {
|
|
|
|
return new_guest_handle.Code();
|
|
|
|
}
|
|
|
|
thread->SetGuestHandle(*new_guest_handle);
|
|
|
|
*out_handle = *new_guest_handle;
|
2017-12-31 22:10:01 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
2017-12-31 22:10:01 +01:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, "
|
|
|
|
"threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}",
|
|
|
|
entry_point, name, arg, stack_top, priority, processor_id, *out_handle);
|
2017-12-31 22:10:01 +01:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-30 19:40:28 +01:00
|
|
|
/// Starts the thread for the provided handle
|
2017-12-30 19:37:07 +01:00
|
|
|
static ResultCode StartThread(Handle thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-12-30 19:37:07 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2017-12-30 19:37:07 +01:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
|
2018-05-19 23:57:44 +02:00
|
|
|
|
2017-12-30 19:37:07 +01:00
|
|
|
thread->ResumeFromWait();
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
2017-12-30 19:37:07 +01:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 22:11:27 +01:00
|
|
|
/// Called when a thread exits
|
|
|
|
static void ExitThread() {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", Core::CurrentArmInterface().GetPC());
|
2017-12-31 22:11:27 +01:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
ExitCurrentThread();
|
2017-12-31 22:11:27 +01:00
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
|
|
|
}
|
|
|
|
|
2014-06-01 16:37:19 +02:00
|
|
|
/// Sleep the current thread
|
2014-11-17 04:58:39 +01:00
|
|
|
static void SleepThread(s64 nanoseconds) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
2014-11-26 06:34:14 +01:00
|
|
|
|
2017-01-05 20:14:22 +01:00
|
|
|
// Don't attempt to yield execution if there are no available threads to run,
|
|
|
|
// this way we avoid a useless reschedule to the idle thread.
|
2018-05-03 04:36:51 +02:00
|
|
|
if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads())
|
2017-01-05 20:14:22 +01:00
|
|
|
return;
|
|
|
|
|
2014-12-20 08:32:19 +01:00
|
|
|
// Sleep current thread and check for next thread to schedule
|
2018-01-03 02:40:30 +01:00
|
|
|
WaitCurrentThread_Sleep();
|
2015-01-07 22:40:08 +01:00
|
|
|
|
|
|
|
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
2018-01-03 02:40:30 +01:00
|
|
|
GetCurrentThread()->WakeAfterDelay(nanoseconds);
|
2017-01-01 17:57:02 +01:00
|
|
|
|
|
|
|
Core::System::GetInstance().PrepareReschedule();
|
2014-06-01 16:37:19 +02:00
|
|
|
}
|
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
/// Wait process wide key atomic
|
2018-01-09 03:41:37 +01:00
|
|
|
static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_variable_addr,
|
2018-01-06 22:14:12 +01:00
|
|
|
Handle thread_handle, s64 nano_seconds) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(
|
2018-01-09 03:41:37 +01:00
|
|
|
Kernel_SVC,
|
2018-05-02 15:14:28 +02:00
|
|
|
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
|
2018-01-09 03:41:37 +01:00
|
|
|
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-01-06 22:14:12 +01:00
|
|
|
ASSERT(thread);
|
|
|
|
|
2018-04-20 21:39:28 +02:00
|
|
|
CASCADE_CODE(Mutex::Release(mutex_addr));
|
2018-01-08 17:35:03 +01:00
|
|
|
|
2018-04-20 21:39:28 +02:00
|
|
|
SharedPtr<Thread> current_thread = GetCurrentThread();
|
2018-10-04 00:47:57 +02:00
|
|
|
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
|
|
|
current_thread->SetMutexWaitAddress(mutex_addr);
|
|
|
|
current_thread->SetWaitHandle(thread_handle);
|
|
|
|
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
|
|
|
current_thread->InvalidateWakeupCallback();
|
2018-01-08 17:35:03 +01:00
|
|
|
|
2018-04-20 21:39:28 +02:00
|
|
|
current_thread->WakeAfterDelay(nano_seconds);
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2018-04-21 03:15:16 +02:00
|
|
|
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
|
2018-01-06 22:14:12 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Signal process wide key
|
2018-01-09 03:41:37 +01:00
|
|
|
static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
2018-07-02 18:20:50 +02:00
|
|
|
condition_variable_addr, target);
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
const auto RetrieveWaitingThreads = [](std::size_t core_index,
|
|
|
|
std::vector<SharedPtr<Thread>>& waiting_threads,
|
|
|
|
VAddr condvar_addr) {
|
2018-09-15 15:21:06 +02:00
|
|
|
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
2018-10-15 15:25:11 +02:00
|
|
|
const auto& thread_list = scheduler.GetThreadList();
|
2018-05-19 23:58:30 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
for (const auto& thread : thread_list) {
|
|
|
|
if (thread->GetCondVarWaitAddress() == condvar_addr)
|
2018-09-15 15:21:06 +02:00
|
|
|
waiting_threads.push_back(thread);
|
|
|
|
}
|
|
|
|
};
|
2018-05-19 23:58:30 +02:00
|
|
|
|
|
|
|
// Retrieve a list of all threads that are waiting for this condition variable.
|
|
|
|
std::vector<SharedPtr<Thread>> waiting_threads;
|
|
|
|
RetrieveWaitingThreads(0, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(1, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(2, waiting_threads, condition_variable_addr);
|
|
|
|
RetrieveWaitingThreads(3, waiting_threads, condition_variable_addr);
|
|
|
|
// Sort them by priority, such that the highest priority ones come first.
|
|
|
|
std::sort(waiting_threads.begin(), waiting_threads.end(),
|
|
|
|
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
2018-10-04 00:47:57 +02:00
|
|
|
return lhs->GetPriority() < rhs->GetPriority();
|
2018-05-19 23:58:30 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
|
|
|
// them all.
|
2018-09-15 15:21:06 +02:00
|
|
|
std::size_t last = waiting_threads.size();
|
2018-05-19 23:58:30 +02:00
|
|
|
if (target != -1)
|
|
|
|
last = target;
|
|
|
|
|
|
|
|
// If there are no threads waiting on this condition variable, just exit
|
|
|
|
if (last > waiting_threads.size())
|
|
|
|
return RESULT_SUCCESS;
|
2018-05-06 04:00:34 +02:00
|
|
|
|
2018-09-15 15:21:06 +02:00
|
|
|
for (std::size_t index = 0; index < last; ++index) {
|
2018-05-19 23:58:30 +02:00
|
|
|
auto& thread = waiting_threads[index];
|
2018-05-06 04:00:34 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
2018-05-06 04:00:34 +02:00
|
|
|
|
2018-09-15 15:21:06 +02:00
|
|
|
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
auto& monitor = Core::System::GetInstance().Monitor();
|
|
|
|
|
|
|
|
// Atomically read the value of the mutex.
|
|
|
|
u32 mutex_val = 0;
|
|
|
|
do {
|
2018-10-04 00:47:57 +02:00
|
|
|
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
// If the mutex is not yet acquired, acquire it.
|
2018-10-04 00:47:57 +02:00
|
|
|
mutex_val = Memory::Read32(thread->GetMutexWaitAddress());
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
if (mutex_val != 0) {
|
|
|
|
monitor.ClearExclusive();
|
|
|
|
break;
|
|
|
|
}
|
2018-10-04 00:47:57 +02:00
|
|
|
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
|
|
|
thread->GetWaitHandle()));
|
2018-05-19 23:58:30 +02:00
|
|
|
|
|
|
|
if (mutex_val == 0) {
|
|
|
|
// We were able to acquire the mutex, resume this thread.
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
2018-05-19 23:58:30 +02:00
|
|
|
thread->ResumeFromWait();
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
auto* const lock_owner = thread->GetLockOwner();
|
|
|
|
if (lock_owner != nullptr) {
|
2018-05-19 23:58:30 +02:00
|
|
|
lock_owner->RemoveMutexWaiter(thread);
|
2018-10-04 00:47:57 +02:00
|
|
|
}
|
2018-05-19 23:58:30 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetLockOwner(nullptr);
|
|
|
|
thread->SetMutexWaitAddress(0);
|
|
|
|
thread->SetCondVarWaitAddress(0);
|
|
|
|
thread->SetWaitHandle(0);
|
2018-05-19 23:58:30 +02:00
|
|
|
} else {
|
2018-07-22 19:27:24 +02:00
|
|
|
// Atomically signal that the mutex now has a waiting thread.
|
|
|
|
do {
|
2018-10-04 00:47:57 +02:00
|
|
|
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
// Ensure that the mutex value is still what we expect.
|
2018-10-04 00:47:57 +02:00
|
|
|
u32 value = Memory::Read32(thread->GetMutexWaitAddress());
|
2018-07-22 19:27:24 +02:00
|
|
|
// TODO(Subv): When this happens, the kernel just clears the exclusive state and
|
|
|
|
// retries the initial read for this thread.
|
|
|
|
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
|
2018-10-04 00:47:57 +02:00
|
|
|
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
2018-07-22 19:27:24 +02:00
|
|
|
mutex_val | Mutex::MutexHasWaitersFlag));
|
|
|
|
|
|
|
|
// The mutex is already owned by some other thread, make this thread wait on it.
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
2018-05-19 23:58:30 +02:00
|
|
|
Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
2018-08-28 18:30:33 +02:00
|
|
|
auto owner = kernel.HandleTable().Get<Thread>(owner_handle);
|
2018-05-19 23:58:30 +02:00
|
|
|
ASSERT(owner);
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
|
|
|
thread->InvalidateWakeupCallback();
|
2018-05-19 23:58:30 +02:00
|
|
|
|
|
|
|
owner->AddMutexWaiter(thread);
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
2018-05-19 23:58:30 +02:00
|
|
|
}
|
|
|
|
}
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-09 05:52:27 +01:00
|
|
|
}
|
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// Wait for an address (via Address Arbiter)
|
|
|
|
static ResultCode WaitForAddress(VAddr address, u32 type, s32 value, s64 timeout) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}",
|
2018-07-02 18:20:50 +02:00
|
|
|
address, type, value, timeout);
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 08:47:59 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
|
|
|
if (address % sizeof(u32) != 0) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-06-22 08:47:59 +02:00
|
|
|
switch (static_cast<AddressArbiter::ArbitrationType>(type)) {
|
2018-06-22 05:09:51 +02:00
|
|
|
case AddressArbiter::ArbitrationType::WaitIfLessThan:
|
|
|
|
return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, false);
|
|
|
|
case AddressArbiter::ArbitrationType::DecrementAndWaitIfLessThan:
|
|
|
|
return AddressArbiter::WaitForAddressIfLessThan(address, value, timeout, true);
|
|
|
|
case AddressArbiter::ArbitrationType::WaitIfEqual:
|
|
|
|
return AddressArbiter::WaitForAddressIfEqual(address, value, timeout);
|
|
|
|
default:
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
2018-06-21 08:49:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Signals to an address (via Address Arbiter)
|
|
|
|
static ResultCode SignalToAddress(VAddr address, u32 type, s32 value, s32 num_to_wake) {
|
2018-07-02 18:20:50 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
|
|
|
|
address, type, value, num_to_wake);
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 08:47:59 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
|
|
|
if (address % sizeof(u32) != 0) {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-06-22 08:47:59 +02:00
|
|
|
switch (static_cast<AddressArbiter::SignalType>(type)) {
|
2018-06-22 05:09:51 +02:00
|
|
|
case AddressArbiter::SignalType::Signal:
|
|
|
|
return AddressArbiter::SignalToAddress(address, num_to_wake);
|
|
|
|
case AddressArbiter::SignalType::IncrementAndSignalIfEqual:
|
|
|
|
return AddressArbiter::IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
|
|
|
case AddressArbiter::SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
|
|
|
return AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(address, value,
|
|
|
|
num_to_wake);
|
|
|
|
default:
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
2018-06-21 08:49:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-12 03:59:31 +01:00
|
|
|
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
|
|
|
static u64 GetSystemTick() {
|
|
|
|
const u64 result{CoreTiming::GetTicks()};
|
|
|
|
|
|
|
|
// Advance time to defeat dumb games that busy-wait for the frame to end.
|
|
|
|
CoreTiming::AddTicks(400);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Close a handle
|
2018-01-03 02:40:30 +01:00
|
|
|
static ResultCode CloseHandle(Handle handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
return kernel.HandleTable().Close(handle);
|
2015-08-06 02:39:53 +02:00
|
|
|
}
|
|
|
|
|
2018-01-08 03:24:19 +01:00
|
|
|
/// Reset an event
|
|
|
|
static ResultCode ResetSignal(Handle handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called handle 0x{:08X}", handle);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto event = kernel.HandleTable().Get<Event>(handle);
|
|
|
|
|
2018-01-08 03:24:19 +01:00
|
|
|
ASSERT(event != nullptr);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-01-08 03:24:19 +01:00
|
|
|
event->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a TransferMemory object
|
|
|
|
static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 permissions) {
|
2018-07-02 18:20:50 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
|
|
|
|
permissions);
|
2018-01-08 03:24:19 +01:00
|
|
|
*handle = 0;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-06 05:13:15 +02:00
|
|
|
static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
*core = thread->GetIdealCore();
|
|
|
|
*mask = thread->GetAffinityMask();
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2018-03-30 03:07:49 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-06 05:13:15 +02:00
|
|
|
static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, mask=0x{:16X}, core=0x{:X}", thread_handle,
|
2018-07-02 18:20:50 +02:00
|
|
|
mask, core);
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const SharedPtr<Thread> thread = kernel.HandleTable().Get<Thread>(thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
if (!thread) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
svc: Correct always true assertion case in SetThreadCoreMask
The reason this would never be true is that ideal_processor is a u8 and
THREADPROCESSORID_DEFAULT is an s32. In this case, it boils down to how
arithmetic conversions are performed before performing the comparison.
If an unsigned value has a lesser conversion rank (aka smaller size)
than the signed type being compared, then the unsigned value is promoted
to the signed value (i.e. u8 -> s32 happens before the comparison). No
sign-extension occurs here either.
An alternative phrasing:
Say we have a variable named core and it's given a value of -2.
u8 core = -2;
This becomes 254 due to the lack of sign. During integral promotion to
the signed type, this still remains as 254, and therefore the condition
will always be true, because no matter what value the u8 is given it
will never be -2 in terms of 32 bits.
Now, if one type was a s32 and one was a u32, this would be entirely
different, since they have the same bit width (and the signed type would
be converted to unsigned instead of the other way around) but would
still have its representation preserved in terms of bits, allowing the
comparison to be false in some cases, as opposed to being true all the
time.
---
We also get rid of two signed/unsigned comparison warnings while we're
at it.
2018-07-19 20:36:22 +02:00
|
|
|
if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) {
|
2018-10-04 00:47:57 +02:00
|
|
|
const u8 default_processor_id = thread->GetOwnerProcess()->GetDefaultProcessorID();
|
|
|
|
|
|
|
|
ASSERT(default_processor_id != static_cast<u8>(THREADPROCESSORID_DEFAULT));
|
|
|
|
|
2018-05-30 19:03:19 +02:00
|
|
|
// Set the target CPU to the one specified in the process' exheader.
|
2018-10-04 00:47:57 +02:00
|
|
|
core = default_processor_id;
|
|
|
|
mask = 1ULL << core;
|
2018-05-30 19:03:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask == 0) {
|
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidCombination);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This value is used to only change the affinity mask without changing the current ideal core.
|
|
|
|
static constexpr u32 OnlyChangeMask = static_cast<u32>(-3);
|
|
|
|
|
|
|
|
if (core == OnlyChangeMask) {
|
2018-10-04 00:47:57 +02:00
|
|
|
core = thread->GetIdealCore();
|
svc: Correct always true assertion case in SetThreadCoreMask
The reason this would never be true is that ideal_processor is a u8 and
THREADPROCESSORID_DEFAULT is an s32. In this case, it boils down to how
arithmetic conversions are performed before performing the comparison.
If an unsigned value has a lesser conversion rank (aka smaller size)
than the signed type being compared, then the unsigned value is promoted
to the signed value (i.e. u8 -> s32 happens before the comparison). No
sign-extension occurs here either.
An alternative phrasing:
Say we have a variable named core and it's given a value of -2.
u8 core = -2;
This becomes 254 due to the lack of sign. During integral promotion to
the signed type, this still remains as 254, and therefore the condition
will always be true, because no matter what value the u8 is given it
will never be -2 in terms of 32 bits.
Now, if one type was a s32 and one was a u32, this would be entirely
different, since they have the same bit width (and the signed type would
be converted to unsigned instead of the other way around) but would
still have its representation preserved in terms of bits, allowing the
comparison to be false in some cases, as opposed to being true all the
time.
---
We also get rid of two signed/unsigned comparison warnings while we're
at it.
2018-07-19 20:36:22 +02:00
|
|
|
} else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) {
|
2018-05-30 19:03:19 +02:00
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error out if the input core isn't enabled in the input mask.
|
2018-06-20 18:39:10 +02:00
|
|
|
if (core < Core::NUM_CPU_CORES && (mask & (1ull << core)) == 0) {
|
2018-05-30 19:03:19 +02:00
|
|
|
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidCombination);
|
|
|
|
}
|
|
|
|
|
2018-05-06 05:13:15 +02:00
|
|
|
thread->ChangeCore(core, mask);
|
|
|
|
|
2018-01-16 23:23:53 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-02-03 19:36:54 +01:00
|
|
|
static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permissions,
|
2018-01-20 01:35:25 +01:00
|
|
|
u32 remote_permissions) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
|
2018-07-02 18:20:50 +02:00
|
|
|
local_permissions, remote_permissions);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-09-14 03:04:43 +02:00
|
|
|
// Size must be a multiple of 4KB and be less than or equal to
|
|
|
|
// approx. 8 GB (actually (1GB - 512B) * 8)
|
|
|
|
if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto local_perms = static_cast<MemoryPermission>(local_permissions);
|
|
|
|
if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
|
|
|
|
if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
|
|
|
|
remote_perms != MemoryPermission::DontCare) {
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
auto& handle_table = kernel.HandleTable();
|
|
|
|
auto shared_mem_handle =
|
|
|
|
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
|
2018-09-14 03:04:43 +02:00
|
|
|
local_perms, remote_perms);
|
2018-01-20 01:35:25 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
|
2018-01-20 01:35:25 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-02-22 15:28:15 +01:00
|
|
|
static ResultCode ClearEvent(Handle handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
|
2018-02-22 15:28:15 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
SharedPtr<Event> evt = kernel.HandleTable().Get<Event>(handle);
|
2018-02-22 15:28:15 +01:00
|
|
|
if (evt == nullptr)
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
evt->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-10-13 20:31:46 +02:00
|
|
|
static ResultCode GetProcessInfo(u64* out, Handle process_handle, u32 type) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
|
|
|
|
|
|
|
|
// This function currently only allows retrieving a process' status.
|
|
|
|
enum class InfoType {
|
|
|
|
Status,
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto& kernel = Core::System::GetInstance().Kernel();
|
|
|
|
const auto process = kernel.HandleTable().Get<Process>(process_handle);
|
|
|
|
if (!process) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto info_type = static_cast<InfoType>(type);
|
|
|
|
if (info_type != InfoType::Status) {
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*out = static_cast<u64>(process->GetStatus());
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-05-06 05:04:25 +02:00
|
|
|
namespace {
|
2016-09-18 02:38:01 +02:00
|
|
|
struct FunctionDef {
|
|
|
|
using Func = void();
|
2015-05-06 05:04:25 +02:00
|
|
|
|
2016-09-18 02:38:01 +02:00
|
|
|
u32 id;
|
|
|
|
Func* func;
|
|
|
|
const char* name;
|
|
|
|
};
|
2017-10-14 23:30:07 +02:00
|
|
|
} // namespace
|
2015-05-06 05:04:25 +02:00
|
|
|
|
|
|
|
static const FunctionDef SVC_Table[] = {
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x00, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x01, SvcWrap<SetHeapSize>, "SetHeapSize"},
|
|
|
|
{0x02, nullptr, "SetMemoryPermission"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x03, SvcWrap<SetMemoryAttribute>, "SetMemoryAttribute"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x04, SvcWrap<MapMemory>, "MapMemory"},
|
|
|
|
{0x05, SvcWrap<UnmapMemory>, "UnmapMemory"},
|
|
|
|
{0x06, SvcWrap<QueryMemory>, "QueryMemory"},
|
|
|
|
{0x07, SvcWrap<ExitProcess>, "ExitProcess"},
|
|
|
|
{0x08, SvcWrap<CreateThread>, "CreateThread"},
|
|
|
|
{0x09, SvcWrap<StartThread>, "StartThread"},
|
|
|
|
{0x0A, SvcWrap<ExitThread>, "ExitThread"},
|
|
|
|
{0x0B, SvcWrap<SleepThread>, "SleepThread"},
|
|
|
|
{0x0C, SvcWrap<GetThreadPriority>, "GetThreadPriority"},
|
|
|
|
{0x0D, SvcWrap<SetThreadPriority>, "SetThreadPriority"},
|
2018-03-30 03:07:49 +02:00
|
|
|
{0x0E, SvcWrap<GetThreadCoreMask>, "GetThreadCoreMask"},
|
2018-01-16 23:23:53 +01:00
|
|
|
{0x0F, SvcWrap<SetThreadCoreMask>, "SetThreadCoreMask"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x10, SvcWrap<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"},
|
|
|
|
{0x11, nullptr, "SignalEvent"},
|
2018-02-22 15:28:15 +01:00
|
|
|
{0x12, SvcWrap<ClearEvent>, "ClearEvent"},
|
2018-01-14 23:15:31 +01:00
|
|
|
{0x13, SvcWrap<MapSharedMemory>, "MapSharedMemory"},
|
2018-02-22 20:16:43 +01:00
|
|
|
{0x14, SvcWrap<UnmapSharedMemory>, "UnmapSharedMemory"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x15, SvcWrap<CreateTransferMemory>, "CreateTransferMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x16, SvcWrap<CloseHandle>, "CloseHandle"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x17, SvcWrap<ResetSignal>, "ResetSignal"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x18, SvcWrap<WaitSynchronization>, "WaitSynchronization"},
|
2018-01-09 21:02:04 +01:00
|
|
|
{0x19, SvcWrap<CancelSynchronization>, "CancelSynchronization"},
|
2018-01-18 02:34:52 +01:00
|
|
|
{0x1A, SvcWrap<ArbitrateLock>, "ArbitrateLock"},
|
|
|
|
{0x1B, SvcWrap<ArbitrateUnlock>, "ArbitrateUnlock"},
|
2018-01-06 22:14:12 +01:00
|
|
|
{0x1C, SvcWrap<WaitProcessWideKeyAtomic>, "WaitProcessWideKeyAtomic"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x1D, SvcWrap<SignalProcessWideKey>, "SignalProcessWideKey"},
|
2018-01-12 03:59:31 +01:00
|
|
|
{0x1E, SvcWrap<GetSystemTick>, "GetSystemTick"},
|
2018-01-18 02:34:52 +01:00
|
|
|
{0x1F, SvcWrap<ConnectToNamedPort>, "ConnectToNamedPort"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x20, nullptr, "SendSyncRequestLight"},
|
|
|
|
{0x21, SvcWrap<SendSyncRequest>, "SendSyncRequest"},
|
|
|
|
{0x22, nullptr, "SendSyncRequestWithUserBuffer"},
|
|
|
|
{0x23, nullptr, "SendAsyncRequestWithUserBuffer"},
|
|
|
|
{0x24, SvcWrap<GetProcessId>, "GetProcessId"},
|
|
|
|
{0x25, SvcWrap<GetThreadId>, "GetThreadId"},
|
|
|
|
{0x26, SvcWrap<Break>, "Break"},
|
|
|
|
{0x27, SvcWrap<OutputDebugString>, "OutputDebugString"},
|
|
|
|
{0x28, nullptr, "ReturnFromException"},
|
|
|
|
{0x29, SvcWrap<GetInfo>, "GetInfo"},
|
|
|
|
{0x2A, nullptr, "FlushEntireDataCache"},
|
|
|
|
{0x2B, nullptr, "FlushDataCache"},
|
|
|
|
{0x2C, nullptr, "MapPhysicalMemory"},
|
|
|
|
{0x2D, nullptr, "UnmapPhysicalMemory"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x2E, nullptr, "GetFutureThreadInfo"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x2F, nullptr, "GetLastThreadInfo"},
|
|
|
|
{0x30, nullptr, "GetResourceLimitLimitValue"},
|
|
|
|
{0x31, nullptr, "GetResourceLimitCurrentValue"},
|
2018-04-03 05:50:17 +02:00
|
|
|
{0x32, SvcWrap<SetThreadActivity>, "SetThreadActivity"},
|
|
|
|
{0x33, SvcWrap<GetThreadContext>, "GetThreadContext"},
|
2018-06-21 08:49:43 +02:00
|
|
|
{0x34, SvcWrap<WaitForAddress>, "WaitForAddress"},
|
|
|
|
{0x35, SvcWrap<SignalToAddress>, "SignalToAddress"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x36, nullptr, "Unknown"},
|
|
|
|
{0x37, nullptr, "Unknown"},
|
|
|
|
{0x38, nullptr, "Unknown"},
|
|
|
|
{0x39, nullptr, "Unknown"},
|
|
|
|
{0x3A, nullptr, "Unknown"},
|
|
|
|
{0x3B, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x3C, nullptr, "DumpInfo"},
|
2018-04-17 17:37:43 +02:00
|
|
|
{0x3D, nullptr, "DumpInfoNew"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x3E, nullptr, "Unknown"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x3F, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x40, nullptr, "CreateSession"},
|
|
|
|
{0x41, nullptr, "AcceptSession"},
|
|
|
|
{0x42, nullptr, "ReplyAndReceiveLight"},
|
|
|
|
{0x43, nullptr, "ReplyAndReceive"},
|
|
|
|
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
|
|
|
|
{0x45, nullptr, "CreateEvent"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x46, nullptr, "Unknown"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x47, nullptr, "Unknown"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x48, nullptr, "MapPhysicalMemoryUnsafe"},
|
|
|
|
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
|
|
|
|
{0x4A, nullptr, "SetUnsafeLimit"},
|
|
|
|
{0x4B, nullptr, "CreateCodeMemory"},
|
|
|
|
{0x4C, nullptr, "ControlCodeMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x4D, nullptr, "SleepSystem"},
|
|
|
|
{0x4E, nullptr, "ReadWriteRegister"},
|
|
|
|
{0x4F, nullptr, "SetProcessActivity"},
|
2018-01-20 01:35:25 +01:00
|
|
|
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x51, nullptr, "MapTransferMemory"},
|
|
|
|
{0x52, nullptr, "UnmapTransferMemory"},
|
|
|
|
{0x53, nullptr, "CreateInterruptEvent"},
|
|
|
|
{0x54, nullptr, "QueryPhysicalAddress"},
|
|
|
|
{0x55, nullptr, "QueryIoMapping"},
|
|
|
|
{0x56, nullptr, "CreateDeviceAddressSpace"},
|
|
|
|
{0x57, nullptr, "AttachDeviceAddressSpace"},
|
|
|
|
{0x58, nullptr, "DetachDeviceAddressSpace"},
|
|
|
|
{0x59, nullptr, "MapDeviceAddressSpaceByForce"},
|
|
|
|
{0x5A, nullptr, "MapDeviceAddressSpaceAligned"},
|
|
|
|
{0x5B, nullptr, "MapDeviceAddressSpace"},
|
|
|
|
{0x5C, nullptr, "UnmapDeviceAddressSpace"},
|
|
|
|
{0x5D, nullptr, "InvalidateProcessDataCache"},
|
|
|
|
{0x5E, nullptr, "StoreProcessDataCache"},
|
|
|
|
{0x5F, nullptr, "FlushProcessDataCache"},
|
|
|
|
{0x60, nullptr, "DebugActiveProcess"},
|
|
|
|
{0x61, nullptr, "BreakDebugProcess"},
|
|
|
|
{0x62, nullptr, "TerminateDebugProcess"},
|
|
|
|
{0x63, nullptr, "GetDebugEvent"},
|
|
|
|
{0x64, nullptr, "ContinueDebugEvent"},
|
|
|
|
{0x65, nullptr, "GetProcessList"},
|
|
|
|
{0x66, nullptr, "GetThreadList"},
|
|
|
|
{0x67, nullptr, "GetDebugThreadContext"},
|
|
|
|
{0x68, nullptr, "SetDebugThreadContext"},
|
|
|
|
{0x69, nullptr, "QueryDebugProcessMemory"},
|
|
|
|
{0x6A, nullptr, "ReadDebugProcessMemory"},
|
|
|
|
{0x6B, nullptr, "WriteDebugProcessMemory"},
|
|
|
|
{0x6C, nullptr, "SetHardwareBreakPoint"},
|
|
|
|
{0x6D, nullptr, "GetDebugThreadParam"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x6E, nullptr, "Unknown"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x6F, nullptr, "GetSystemInfo"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x70, nullptr, "CreatePort"},
|
|
|
|
{0x71, nullptr, "ManageNamedPort"},
|
|
|
|
{0x72, nullptr, "ConnectToPort"},
|
|
|
|
{0x73, nullptr, "SetProcessMemoryPermission"},
|
|
|
|
{0x74, nullptr, "MapProcessMemory"},
|
|
|
|
{0x75, nullptr, "UnmapProcessMemory"},
|
|
|
|
{0x76, nullptr, "QueryProcessMemory"},
|
|
|
|
{0x77, nullptr, "MapProcessCodeMemory"},
|
|
|
|
{0x78, nullptr, "UnmapProcessCodeMemory"},
|
|
|
|
{0x79, nullptr, "CreateProcess"},
|
|
|
|
{0x7A, nullptr, "StartProcess"},
|
|
|
|
{0x7B, nullptr, "TerminateProcess"},
|
2018-10-13 20:31:46 +02:00
|
|
|
{0x7C, SvcWrap<GetProcessInfo>, "GetProcessInfo"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x7D, nullptr, "CreateResourceLimit"},
|
|
|
|
{0x7E, nullptr, "SetResourceLimitLimitValue"},
|
|
|
|
{0x7F, nullptr, "CallSecureMonitor"},
|
2014-04-11 01:58:28 +02:00
|
|
|
};
|
|
|
|
|
2015-07-21 09:51:36 +02:00
|
|
|
static const FunctionDef* GetSVCInfo(u32 func_num) {
|
2018-04-20 04:36:48 +02:00
|
|
|
if (func_num >= std::size(SVC_Table)) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
|
2015-05-06 05:04:25 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return &SVC_Table[func_num];
|
|
|
|
}
|
|
|
|
|
2015-08-17 23:25:21 +02:00
|
|
|
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|
|
|
|
2015-07-21 09:51:36 +02:00
|
|
|
void CallSVC(u32 immediate) {
|
2015-08-17 23:25:21 +02:00
|
|
|
MICROPROFILE_SCOPE(Kernel_SVC);
|
2015-05-06 05:04:25 +02:00
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
// Lock the global kernel mutex when we enter the kernel HLE.
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
|
|
|
|
2015-07-21 09:51:36 +02:00
|
|
|
const FunctionDef* info = GetSVCInfo(immediate);
|
2015-05-06 05:04:25 +02:00
|
|
|
if (info) {
|
|
|
|
if (info->func) {
|
|
|
|
info->func();
|
|
|
|
} else {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
|
2015-05-06 05:04:25 +02:00
|
|
|
}
|
2017-10-14 23:30:07 +02:00
|
|
|
} else {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
|
2015-05-06 05:04:25 +02:00
|
|
|
}
|
2014-04-11 01:58:28 +02:00
|
|
|
}
|
2014-04-12 00:44:21 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
} // namespace Kernel
|