2018-01-13 22:22:39 +01:00
|
|
|
// Copyright 2018 yuzu emulator team
|
2014-12-17 06:38:14 +01:00
|
|
|
// Licensed under GPLv2 or any later version
|
2014-11-19 09:49:13 +01:00
|
|
|
// Refer to the license.txt file included.
|
2014-04-11 01:58:28 +02:00
|
|
|
|
2018-01-12 04:36:56 +01:00
|
|
|
#include <algorithm>
|
2018-02-25 13:40:22 +01:00
|
|
|
#include <cinttypes>
|
2018-04-20 04:36:48 +02:00
|
|
|
#include <iterator>
|
2018-07-31 14:06:09 +02:00
|
|
|
#include <mutex>
|
|
|
|
#include <vector>
|
2018-01-12 04:36:56 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
#include "common/alignment.h"
|
2018-07-31 14:06:09 +02:00
|
|
|
#include "common/assert.h"
|
2015-05-06 09:06:12 +02:00
|
|
|
#include "common/logging/log.h"
|
2015-08-17 23:25:21 +02:00
|
|
|
#include "common/microprofile.h"
|
2018-01-05 01:45:15 +01:00
|
|
|
#include "common/string_util.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-03-13 22:49:59 +01:00
|
|
|
#include "core/core.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/core_cpu.h"
|
2016-09-18 02:38:01 +02:00
|
|
|
#include "core/core_timing.h"
|
2019-11-23 22:01:06 +01:00
|
|
|
#include "core/core_timing_util.h"
|
2018-06-21 08:49:43 +02:00
|
|
|
#include "core/hle/kernel/address_arbiter.h"
|
2016-05-22 19:30:13 +02:00
|
|
|
#include "core/hle/kernel/client_port.h"
|
2016-06-15 01:03:30 +02:00
|
|
|
#include "core/hle/kernel/client_session.h"
|
2019-03-05 15:20:11 +01:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2017-05-30 01:45:42 +02:00
|
|
|
#include "core/hle/kernel/handle_table.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/hle/kernel/kernel.h"
|
2018-01-01 20:38:34 +01:00
|
|
|
#include "core/hle/kernel/mutex.h"
|
2015-05-11 16:15:10 +02:00
|
|
|
#include "core/hle/kernel/process.h"
|
2018-11-27 00:34:07 +01:00
|
|
|
#include "core/hle/kernel/readable_event.h"
|
2017-12-31 21:58:16 +01:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2018-08-31 18:21:34 +02:00
|
|
|
#include "core/hle/kernel/scheduler.h"
|
2018-01-14 23:15:31 +01:00
|
|
|
#include "core/hle/kernel/shared_memory.h"
|
2018-01-03 02:40:30 +01:00
|
|
|
#include "core/hle/kernel/svc.h"
|
|
|
|
#include "core/hle/kernel/svc_wrap.h"
|
2017-10-15 04:18:42 +02:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2019-03-13 08:09:27 +01:00
|
|
|
#include "core/hle/kernel/transfer_memory.h"
|
2018-11-27 00:34:07 +01:00
|
|
|
#include "core/hle/kernel/writable_event.h"
|
2017-10-14 23:30:07 +02:00
|
|
|
#include "core/hle/lock.h"
|
2014-10-23 05:20:01 +02:00
|
|
|
#include "core/hle/result.h"
|
2014-04-13 03:55:36 +02:00
|
|
|
#include "core/hle/service/service.h"
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
#include "core/memory.h"
|
2019-05-18 03:46:17 +02:00
|
|
|
#include "core/reporter.h"
|
2014-04-11 01:58:28 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
namespace Kernel {
|
2018-09-14 01:14:50 +02:00
|
|
|
namespace {
|
2018-10-10 20:18:27 +02:00
|
|
|
|
|
|
|
// Checks if address + size is greater than the given address
|
|
|
|
// This can return false if the size causes an overflow of a 64-bit type
|
|
|
|
// or if the given size is zero.
|
|
|
|
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
|
|
|
|
return address + size > address;
|
|
|
|
}
|
|
|
|
|
2018-11-27 02:56:50 +01:00
|
|
|
// 8 GiB
|
2018-11-27 02:53:18 +01:00
|
|
|
constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
|
2018-11-27 02:29:06 +01:00
|
|
|
|
2018-10-10 20:18:27 +02:00
|
|
|
// Helper function that performs the common sanity checks for svcMapMemory
|
|
|
|
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
|
|
|
// in the same order.
|
|
|
|
ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_addr, VAddr src_addr,
|
|
|
|
u64 size) {
|
2018-11-26 09:47:39 +01:00
|
|
|
if (!Common::Is4KBAligned(dst_addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-11-26 09:47:39 +01:00
|
|
|
if (!Common::Is4KBAligned(src_addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Source address is not aligned to 4KB, 0x{:016X}", src_addr);
|
2018-11-27 02:29:06 +01:00
|
|
|
return ERR_INVALID_SIZE;
|
2018-11-26 09:47:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
dst_addr, size);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Source is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
src_addr, size);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2019-03-04 22:30:17 +01:00
|
|
|
if (!vm_manager.IsWithinAddressSpace(src_addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
src_addr, size);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2019-07-06 08:02:01 +02:00
|
|
|
if (!vm_manager.IsWithinStackRegion(dst_addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
2019-07-06 08:02:01 +02:00
|
|
|
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
|
2018-11-26 09:47:39 +01:00
|
|
|
dst_addr, size);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VAddr dst_end_address = dst_addr + size;
|
|
|
|
if (dst_end_address > vm_manager.GetHeapRegionBaseAddress() &&
|
2018-10-12 07:43:15 +02:00
|
|
|
vm_manager.GetHeapRegionEndAddress() > dst_addr) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination does not fit within the heap region, addr=0x{:016X}, "
|
|
|
|
"size=0x{:016X}, end_addr=0x{:016X}",
|
|
|
|
dst_addr, size, dst_end_address);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
2018-10-12 07:43:15 +02:00
|
|
|
if (dst_end_address > vm_manager.GetMapRegionBaseAddress() &&
|
|
|
|
vm_manager.GetMapRegionEndAddress() > dst_addr) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination does not fit within the map region, addr=0x{:016X}, "
|
|
|
|
"size=0x{:016X}, end_addr=0x{:016X}",
|
|
|
|
dst_addr, size, dst_end_address);
|
2018-10-10 20:18:27 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-11-27 01:14:29 +01:00
|
|
|
|
|
|
|
enum class ResourceLimitValueType {
|
|
|
|
CurrentValue,
|
|
|
|
LimitValue,
|
|
|
|
};
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit,
|
|
|
|
u32 resource_type, ResourceLimitValueType value_type) {
|
2018-11-27 01:14:29 +01:00
|
|
|
const auto type = static_cast<ResourceType>(resource_type);
|
|
|
|
if (!IsValidResourceType(type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2018-11-27 01:14:29 +01:00
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
const auto resource_limit_object =
|
|
|
|
current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
|
|
|
|
if (!resource_limit_object) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
|
|
|
|
resource_limit);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (value_type == ResourceLimitValueType::CurrentValue) {
|
|
|
|
return MakeResult(resource_limit_object->GetCurrentResourceValue(type));
|
|
|
|
}
|
|
|
|
|
|
|
|
return MakeResult(resource_limit_object->GetMaxResourceValue(type));
|
|
|
|
}
|
2018-09-14 01:14:50 +02:00
|
|
|
} // Anonymous namespace
|
2014-04-11 05:26:12 +02:00
|
|
|
|
2017-12-28 21:29:52 +01:00
|
|
|
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
|
2018-09-14 01:09:04 +02:00
|
|
|
|
2018-11-27 02:29:06 +01:00
|
|
|
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB.
|
2018-11-27 02:53:18 +01:00
|
|
|
if ((heap_size % 0x200000) != 0) {
|
2018-11-27 02:29:06 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}",
|
|
|
|
heap_size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-11-27 02:53:18 +01:00
|
|
|
if (heap_size >= 0x200000000) {
|
2018-11-27 02:29:06 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size);
|
2018-09-14 01:09:04 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& vm_manager = system.Kernel().CurrentProcess()->VMManager();
|
2019-03-24 21:28:04 +01:00
|
|
|
const auto alloc_result = vm_manager.SetHeapSize(heap_size);
|
2018-12-28 00:31:31 +01:00
|
|
|
if (alloc_result.Failed()) {
|
|
|
|
return alloc_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*heap_addr = *alloc_result;
|
2017-12-28 21:29:52 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetMemoryPermission(Core::System& system, VAddr addr, u64 size, u32 prot) {
|
2018-11-06 10:21:01 +01:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, addr=0x{:X}, size=0x{:X}, prot=0x{:X}", addr, size, prot);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-11-26 09:47:39 +01:00
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
addr, size);
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto permission = static_cast<MemoryPermission>(prot);
|
|
|
|
if (permission != MemoryPermission::None && permission != MemoryPermission::Read &&
|
|
|
|
permission != MemoryPermission::ReadWrite) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid memory permission specified, Got memory permission=0x{:08X}",
|
|
|
|
static_cast<u32>(permission));
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2018-11-06 10:21:01 +01:00
|
|
|
auto& vm_manager = current_process->VMManager();
|
|
|
|
|
2019-03-04 22:30:17 +01:00
|
|
|
if (!vm_manager.IsWithinAddressSpace(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr,
|
|
|
|
size);
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VMManager::VMAHandle iter = vm_manager.FindVMA(addr);
|
2018-12-06 16:59:22 +01:00
|
|
|
if (!vm_manager.IsValidHandle(iter)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr);
|
2018-11-06 10:21:01 +01:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_WARNING(Kernel_SVC, "Uniformity check on protected memory is not implemented.");
|
|
|
|
// TODO: Performs a uniformity check to make sure only protected memory is changed (it doesn't
|
|
|
|
// make sense to allow changing permissions on kernel memory itself, etc).
|
|
|
|
|
|
|
|
const auto converted_permissions = SharedMemory::ConvertPermissions(permission);
|
|
|
|
|
|
|
|
return vm_manager.ReprotectRange(addr, size, converted_permissions);
|
2018-11-03 16:01:34 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
|
|
|
|
u32 attribute) {
|
2018-12-15 21:21:41 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
|
|
|
|
size, mask, attribute);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address not page aligned (0x{:016X})", address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid size (0x{:X}). Size must be non-zero and page aligned.",
|
|
|
|
size);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address range overflowed (Address: 0x{:016X}, Size: 0x{:016X})",
|
|
|
|
address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto mem_attribute = static_cast<MemoryAttribute>(attribute);
|
|
|
|
const auto mem_mask = static_cast<MemoryAttribute>(mask);
|
|
|
|
const auto attribute_with_mask = mem_attribute | mem_mask;
|
|
|
|
|
|
|
|
if (attribute_with_mask != mem_mask) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
|
|
|
|
attribute, mask);
|
|
|
|
return ERR_INVALID_COMBINATION;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8).");
|
|
|
|
return ERR_INVALID_COMBINATION;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& vm_manager = system.Kernel().CurrentProcess()->VMManager();
|
2019-03-04 22:30:17 +01:00
|
|
|
if (!vm_manager.IsWithinAddressSpace(address, size)) {
|
2018-12-15 21:21:41 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Given address (0x{:016X}) is outside the bounds of the address space.", address);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute);
|
2018-01-08 03:23:42 +01:00
|
|
|
}
|
|
|
|
|
2017-12-29 03:38:38 +01:00
|
|
|
/// Maps a memory range into a different range.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 18:20:50 +02:00
|
|
|
src_addr, size);
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& vm_manager = system.Kernel().CurrentProcess()->VMManager();
|
2018-10-10 20:18:27 +02:00
|
|
|
const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
|
2018-12-28 00:31:31 +01:00
|
|
|
|
|
|
|
if (result.IsError()) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return result;
|
2018-09-14 01:14:50 +02:00
|
|
|
}
|
|
|
|
|
2018-12-28 00:31:31 +01:00
|
|
|
return vm_manager.MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack);
|
2017-12-29 03:38:38 +01:00
|
|
|
}
|
|
|
|
|
2017-12-31 21:22:49 +01:00
|
|
|
/// Unmaps a region that was previously mapped with svcMapMemory
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
2018-07-02 18:20:50 +02:00
|
|
|
src_addr, size);
|
2018-09-14 01:14:50 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& vm_manager = system.Kernel().CurrentProcess()->VMManager();
|
2018-10-10 20:18:27 +02:00
|
|
|
const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size);
|
2018-12-28 00:31:31 +01:00
|
|
|
|
|
|
|
if (result.IsError()) {
|
2018-10-10 20:18:27 +02:00
|
|
|
return result;
|
2018-09-14 01:14:50 +02:00
|
|
|
}
|
|
|
|
|
2019-07-11 10:38:28 +02:00
|
|
|
const auto unmap_res = vm_manager.UnmapRange(dst_addr, size);
|
|
|
|
|
|
|
|
// Reprotect the source mapping on success
|
|
|
|
if (unmap_res.IsSuccess()) {
|
|
|
|
ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess());
|
|
|
|
}
|
|
|
|
|
|
|
|
return unmap_res;
|
2017-12-31 21:22:49 +01:00
|
|
|
}
|
|
|
|
|
2014-04-13 03:55:36 +02:00
|
|
|
/// Connect to an OS service given the port name, returns the handle to the port to out
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
|
|
|
VAddr port_name_address) {
|
2018-09-02 17:58:58 +02:00
|
|
|
if (!Memory::IsValidVirtualAddress(port_name_address)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
|
|
|
|
port_name_address);
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_FOUND;
|
2018-09-02 17:58:58 +02:00
|
|
|
}
|
2017-10-14 23:30:07 +02:00
|
|
|
|
|
|
|
static constexpr std::size_t PortNameMaxLength = 11;
|
|
|
|
// Read 1 char beyond the max allowed port name to detect names that are too long.
|
|
|
|
std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1);
|
2018-09-02 17:58:58 +02:00
|
|
|
if (port_name.size() > PortNameMaxLength) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
|
|
|
|
port_name.size());
|
2018-11-16 20:24:27 +01:00
|
|
|
return ERR_OUT_OF_RANGE;
|
2018-09-02 17:58:58 +02:00
|
|
|
}
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
|
|
|
const auto it = kernel.FindNamedPort(port_name);
|
2018-09-02 17:58:58 +02:00
|
|
|
if (!kernel.IsValidNamedPort(it)) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
|
2018-01-03 02:40:30 +01:00
|
|
|
return ERR_NOT_FOUND;
|
2015-01-30 19:07:04 +01:00
|
|
|
}
|
2014-06-02 02:48:29 +02:00
|
|
|
|
2016-12-05 17:02:08 +01:00
|
|
|
auto client_port = it->second;
|
2016-06-15 01:03:30 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
SharedPtr<ClientSession> client_session;
|
2016-12-05 19:59:57 +01:00
|
|
|
CASCADE_RESULT(client_session, client_port->Connect());
|
2016-06-15 01:03:30 +02:00
|
|
|
|
|
|
|
// Return the client session
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
CASCADE_RESULT(*out_handle, handle_table.Create(client_session));
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-04-13 03:55:36 +02:00
|
|
|
}
|
|
|
|
|
2016-12-08 17:06:19 +01:00
|
|
|
/// Makes a blocking IPC call to an OS service.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
SharedPtr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!session) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
2015-01-23 06:44:52 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2014-10-23 05:20:01 +02:00
|
|
|
}
|
2014-05-27 04:12:46 +02:00
|
|
|
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
2014-05-27 04:12:46 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
system.PrepareReschedule();
|
2017-01-01 17:57:02 +01:00
|
|
|
|
2016-12-14 18:33:49 +01:00
|
|
|
// TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server
|
|
|
|
// responds and cause a reschedule.
|
2019-04-07 00:46:18 +02:00
|
|
|
return session->SendSyncRequest(system.CurrentScheduler().GetCurrentThread());
|
2014-04-11 01:58:28 +02:00
|
|
|
}
|
|
|
|
|
2017-10-23 06:15:45 +02:00
|
|
|
/// Get the ID for the specified thread.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-10-23 06:15:45 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
|
2017-10-23 06:15:45 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
*thread_id = thread->GetThreadID();
|
2017-10-23 06:15:45 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-12-19 05:09:08 +01:00
|
|
|
/// Gets the ID of the specified process or a specified thread's owning process.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) {
|
2018-12-19 05:09:08 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
|
2017-10-23 06:15:45 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-19 05:09:08 +01:00
|
|
|
const SharedPtr<Process> process = handle_table.Get<Process>(handle);
|
|
|
|
if (process) {
|
|
|
|
*process_id = process->GetProcessID();
|
|
|
|
return RESULT_SUCCESS;
|
2017-10-23 06:15:45 +02:00
|
|
|
}
|
|
|
|
|
2018-12-19 05:09:08 +01:00
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle);
|
|
|
|
if (thread) {
|
|
|
|
const Process* const owner_process = thread->GetOwnerProcess();
|
|
|
|
if (!owner_process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered.");
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*process_id = owner_process->GetProcessID();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: This should also handle debug objects before returning.
|
|
|
|
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
2017-10-23 06:15:45 +02:00
|
|
|
}
|
|
|
|
|
2018-01-06 20:19:28 +01:00
|
|
|
/// Default thread wakeup callback for WaitSynchronization
|
2018-01-08 17:35:03 +01:00
|
|
|
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
2018-09-15 15:21:06 +02:00
|
|
|
SharedPtr<WaitObject> object, std::size_t index) {
|
2019-04-17 13:21:19 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
2018-01-06 20:19:28 +01:00
|
|
|
|
|
|
|
if (reason == ThreadWakeupReason::Timeout) {
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
2018-01-08 17:35:03 +01:00
|
|
|
return true;
|
2018-01-06 20:19:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(reason == ThreadWakeupReason::Signal);
|
|
|
|
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
2018-01-09 21:02:43 +01:00
|
|
|
thread->SetWaitSynchronizationOutput(static_cast<u32>(index));
|
2018-01-08 17:35:03 +01:00
|
|
|
return true;
|
2018-01-06 20:19:28 +01:00
|
|
|
};
|
|
|
|
|
2018-01-01 20:47:57 +01:00
|
|
|
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
|
|
|
|
u64 handle_count, s64 nano_seconds) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
2018-07-02 18:20:50 +02:00
|
|
|
handles_address, handle_count, nano_seconds);
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2018-11-26 07:06:13 +01:00
|
|
|
if (!Memory::IsValidVirtualAddress(handles_address)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
|
|
|
|
handles_address);
|
2018-01-06 20:34:32 +01:00
|
|
|
return ERR_INVALID_POINTER;
|
2018-11-26 07:06:13 +01:00
|
|
|
}
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2018-01-09 21:02:43 +01:00
|
|
|
static constexpr u64 MaxHandles = 0x40;
|
|
|
|
|
2018-11-16 20:24:27 +01:00
|
|
|
if (handle_count > MaxHandles) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}",
|
|
|
|
MaxHandles, handle_count);
|
2018-11-16 20:24:27 +01:00
|
|
|
return ERR_OUT_OF_RANGE;
|
|
|
|
}
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const thread = system.CurrentScheduler().GetCurrentThread();
|
2018-01-09 17:53:50 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
using ObjectPtr = Thread::ThreadWaitObjects::value_type;
|
|
|
|
Thread::ThreadWaitObjects objects(handle_count);
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-01-06 20:34:32 +01:00
|
|
|
|
2018-07-24 15:55:15 +02:00
|
|
|
for (u64 i = 0; i < handle_count; ++i) {
|
|
|
|
const Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
|
2018-10-20 20:34:41 +02:00
|
|
|
const auto object = handle_table.Get<WaitObject>(handle);
|
2018-07-24 15:55:15 +02:00
|
|
|
|
|
|
|
if (object == nullptr) {
|
2018-11-26 07:06:13 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
|
2018-01-06 20:34:32 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2018-07-24 15:55:15 +02:00
|
|
|
}
|
|
|
|
|
2018-01-06 20:34:32 +01:00
|
|
|
objects[i] = object;
|
|
|
|
}
|
|
|
|
|
2018-01-09 17:53:50 +01:00
|
|
|
// Find the first object that is acquirable in the provided list of objects
|
|
|
|
auto itr = std::find_if(objects.begin(), objects.end(), [thread](const ObjectPtr& object) {
|
|
|
|
return !object->ShouldWait(thread);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (itr != objects.end()) {
|
|
|
|
// We found a ready object, acquire it and set the result value
|
|
|
|
WaitObject* object = itr->get();
|
|
|
|
object->Acquire(thread);
|
|
|
|
*index = static_cast<s32>(std::distance(objects.begin(), itr));
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No objects were ready to be acquired, prepare to suspend the thread.
|
|
|
|
|
|
|
|
// If a timeout value of 0 was provided, just return the Timeout error code instead of
|
|
|
|
// suspending the thread.
|
2018-11-26 07:06:13 +01:00
|
|
|
if (nano_seconds == 0) {
|
2018-01-09 17:53:50 +01:00
|
|
|
return RESULT_TIMEOUT;
|
2018-11-26 07:06:13 +01:00
|
|
|
}
|
2018-01-09 17:53:50 +01:00
|
|
|
|
2019-11-16 16:05:39 +01:00
|
|
|
if (thread->IsSyncCancelled()) {
|
|
|
|
thread->SetSyncCancelled(false);
|
|
|
|
return ERR_SYNCHRONIZATION_CANCELED;
|
|
|
|
}
|
|
|
|
|
2018-11-26 07:06:13 +01:00
|
|
|
for (auto& object : objects) {
|
2018-01-09 21:02:43 +01:00
|
|
|
object->AddWaitingThread(thread);
|
2018-11-26 07:06:13 +01:00
|
|
|
}
|
2018-01-09 21:02:43 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetWaitObjects(std::move(objects));
|
2019-04-17 13:21:19 +02:00
|
|
|
thread->SetStatus(ThreadStatus::WaitSynch);
|
2018-01-09 21:02:43 +01:00
|
|
|
|
|
|
|
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
|
|
|
thread->WakeAfterDelay(nano_seconds);
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetWakeupCallback(DefaultThreadWakeupCallback);
|
2018-01-09 21:02:43 +01:00
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-01-09 21:02:43 +01:00
|
|
|
|
|
|
|
return RESULT_TIMEOUT;
|
2018-01-01 20:47:57 +01:00
|
|
|
}
|
|
|
|
|
2018-01-09 21:02:04 +01:00
|
|
|
/// Resumes a thread waiting on WaitSynchronization
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
2018-01-09 21:02:04 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2019-04-17 13:08:12 +02:00
|
|
|
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
2018-01-09 21:02:04 +01:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
|
|
|
thread_handle);
|
2018-01-09 21:02:04 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2019-04-17 13:08:12 +02:00
|
|
|
thread->CancelWait();
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-01-09 21:02:04 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-01 20:02:26 +01:00
|
|
|
/// Attempts to locks a mutex, creating it if it does not already exist
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
|
|
|
|
VAddr mutex_addr, Handle requesting_thread_handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
|
|
|
|
"requesting_current_thread_handle=0x{:08X}",
|
|
|
|
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
2018-01-01 20:02:26 +01:00
|
|
|
|
2018-09-18 00:49:51 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
|
|
|
mutex_addr);
|
2018-09-18 00:49:51 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 19:01:26 +02:00
|
|
|
if (!Common::IsWordAligned(mutex_addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
2018-10-18 19:01:26 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2019-03-14 05:29:54 +01:00
|
|
|
return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
|
|
|
|
requesting_thread_handle);
|
2018-01-01 20:02:26 +01:00
|
|
|
}
|
|
|
|
|
2018-01-01 20:04:36 +01:00
|
|
|
/// Unlock a mutex
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
2018-01-01 20:04:36 +01:00
|
|
|
|
2018-09-18 00:49:51 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
|
|
|
mutex_addr);
|
2018-09-18 00:49:51 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 19:01:26 +02:00
|
|
|
if (!Common::IsWordAligned(mutex_addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
2018-10-18 19:01:26 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2019-03-14 05:29:54 +01:00
|
|
|
return current_process->GetMutex().Release(mutex_addr);
|
2018-01-01 20:04:36 +01:00
|
|
|
}
|
|
|
|
|
2018-10-23 06:17:13 +02:00
|
|
|
enum class BreakType : u32 {
|
2018-10-23 06:03:59 +02:00
|
|
|
Panic = 0,
|
2018-10-23 06:17:13 +02:00
|
|
|
AssertionFailed = 1,
|
2018-10-23 06:03:59 +02:00
|
|
|
PreNROLoad = 3,
|
|
|
|
PostNROLoad = 4,
|
|
|
|
PreNROUnload = 5,
|
|
|
|
PostNROUnload = 6,
|
2019-01-27 03:19:04 +01:00
|
|
|
CppException = 7,
|
2018-10-23 06:03:59 +02:00
|
|
|
};
|
|
|
|
|
2018-10-09 03:11:14 +02:00
|
|
|
struct BreakReason {
|
|
|
|
union {
|
2018-10-10 03:23:50 +02:00
|
|
|
u32 raw;
|
2018-10-23 06:03:59 +02:00
|
|
|
BitField<0, 30, BreakType> break_type;
|
2018-10-10 03:27:44 +02:00
|
|
|
BitField<31, 1, u32> signal_debugger;
|
2018-10-09 03:11:14 +02:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Break program execution
|
2019-04-07 00:46:18 +02:00
|
|
|
static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
2018-10-09 03:11:14 +02:00
|
|
|
BreakReason break_reason{reason};
|
2018-11-08 05:43:54 +01:00
|
|
|
bool has_dumped_buffer{};
|
2019-05-18 03:46:17 +02:00
|
|
|
std::vector<u8> debug_buffer;
|
2018-10-23 06:03:59 +02:00
|
|
|
|
2018-11-08 05:43:54 +01:00
|
|
|
const auto handle_debug_buffer = [&](VAddr addr, u64 sz) {
|
|
|
|
if (sz == 0 || addr == 0 || has_dumped_buffer) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This typically is an error code so we're going to assume this is the case
|
|
|
|
if (sz == sizeof(u32)) {
|
|
|
|
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", Memory::Read32(addr));
|
|
|
|
} else {
|
|
|
|
// We don't know what's in here so we'll hexdump it
|
2019-05-18 03:46:17 +02:00
|
|
|
debug_buffer.resize(sz);
|
2018-11-08 05:43:54 +01:00
|
|
|
Memory::ReadBlock(addr, debug_buffer.data(), sz);
|
|
|
|
std::string hexdump;
|
|
|
|
for (std::size_t i = 0; i < debug_buffer.size(); i++) {
|
|
|
|
hexdump += fmt::format("{:02X} ", debug_buffer[i]);
|
|
|
|
if (i != 0 && i % 16 == 0) {
|
|
|
|
hexdump += '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
LOG_CRITICAL(Debug_Emulated, "debug_buffer=\n{}", hexdump);
|
|
|
|
}
|
|
|
|
has_dumped_buffer = true;
|
|
|
|
};
|
2018-10-23 06:03:59 +02:00
|
|
|
switch (break_reason.break_type) {
|
|
|
|
case BreakType::Panic:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
2018-11-08 05:43:54 +01:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 06:17:13 +02:00
|
|
|
break;
|
|
|
|
case BreakType::AssertionFailed:
|
|
|
|
LOG_CRITICAL(Debug_Emulated,
|
|
|
|
"Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
info1, info2);
|
2018-11-08 05:43:54 +01:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROLoad:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
2018-10-09 02:10:30 +02:00
|
|
|
Debug_Emulated,
|
2018-10-23 06:17:13 +02:00
|
|
|
"Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PostNROLoad:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
case BreakType::PreNROUnload:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
2018-10-09 02:10:30 +02:00
|
|
|
Debug_Emulated,
|
2018-10-23 06:03:59 +02:00
|
|
|
"Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}",
|
|
|
|
info1, info2);
|
|
|
|
break;
|
|
|
|
case BreakType::PostNROUnload:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(Debug_Emulated,
|
|
|
|
"Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1,
|
|
|
|
info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
2019-01-27 03:19:04 +01:00
|
|
|
case BreakType::CppException:
|
|
|
|
LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered.");
|
|
|
|
break;
|
2018-10-23 06:03:59 +02:00
|
|
|
default:
|
2018-10-23 06:17:13 +02:00
|
|
|
LOG_WARNING(
|
|
|
|
Debug_Emulated,
|
|
|
|
"Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
static_cast<u32>(break_reason.break_type.Value()), info1, info2);
|
2018-11-08 05:43:54 +01:00
|
|
|
handle_debug_buffer(info1, info2);
|
2018-10-23 06:03:59 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-18 03:46:17 +02:00
|
|
|
system.GetReporter().SaveSvcBreakReport(
|
|
|
|
static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1,
|
|
|
|
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
|
|
|
|
2018-10-23 06:03:59 +02:00
|
|
|
if (!break_reason.signal_debugger) {
|
2018-10-09 03:11:14 +02:00
|
|
|
LOG_CRITICAL(
|
2018-10-09 02:10:30 +02:00
|
|
|
Debug_Emulated,
|
|
|
|
"Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
|
|
|
|
reason, info1, info2);
|
2019-04-07 00:46:18 +02:00
|
|
|
|
2018-11-08 05:43:54 +01:00
|
|
|
handle_debug_buffer(info1, info2);
|
2019-04-07 00:46:18 +02:00
|
|
|
|
|
|
|
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
|
|
|
const auto thread_processor_id = current_thread->GetProcessorID();
|
|
|
|
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
|
2018-10-09 03:11:14 +02:00
|
|
|
ASSERT(false);
|
2018-10-14 09:14:51 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
system.Kernel().CurrentProcess()->PrepareForTermination();
|
2018-10-14 09:14:51 +02:00
|
|
|
|
|
|
|
// Kill the current thread
|
2019-04-07 00:46:18 +02:00
|
|
|
current_thread->Stop();
|
|
|
|
system.PrepareReschedule();
|
2018-10-09 02:10:30 +02:00
|
|
|
}
|
2014-04-17 02:41:33 +02:00
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Used to output a message on a debug hardware unit - does nothing on a retail unit
|
2019-04-07 00:46:18 +02:00
|
|
|
static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) {
|
2018-09-12 10:51:41 +02:00
|
|
|
if (len == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-26 01:11:22 +02:00
|
|
|
std::string str(len, '\0');
|
|
|
|
Memory::ReadBlock(address, str.data(), str.size());
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_DEBUG(Debug_Emulated, "{}", str);
|
2014-05-18 05:37:25 +02:00
|
|
|
}
|
|
|
|
|
2018-01-01 22:01:06 +01:00
|
|
|
/// Gets system/memory information for the current process
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle,
|
|
|
|
u64 info_sub_id) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
2018-07-02 18:20:50 +02:00
|
|
|
info_sub_id, handle);
|
2018-01-01 22:01:06 +01:00
|
|
|
|
2018-10-26 06:37:14 +02:00
|
|
|
enum class GetInfoType : u64 {
|
|
|
|
// 1.0.0+
|
2018-12-31 03:09:00 +01:00
|
|
|
AllowedCPUCoreMask = 0,
|
|
|
|
AllowedThreadPriorityMask = 1,
|
2018-10-26 06:37:14 +02:00
|
|
|
MapRegionBaseAddr = 2,
|
|
|
|
MapRegionSize = 3,
|
|
|
|
HeapRegionBaseAddr = 4,
|
|
|
|
HeapRegionSize = 5,
|
2019-06-10 00:12:02 +02:00
|
|
|
TotalPhysicalMemoryAvailable = 6,
|
2019-03-29 03:59:17 +01:00
|
|
|
TotalPhysicalMemoryUsed = 7,
|
2018-10-26 06:37:14 +02:00
|
|
|
IsCurrentProcessBeingDebugged = 8,
|
2018-12-04 06:29:15 +01:00
|
|
|
RegisterResourceLimit = 9,
|
2018-10-26 06:37:14 +02:00
|
|
|
IdleTickCount = 10,
|
|
|
|
RandomEntropy = 11,
|
2019-06-10 00:08:37 +02:00
|
|
|
ThreadTickCount = 0xF0000002,
|
2018-10-26 06:37:14 +02:00
|
|
|
// 2.0.0+
|
|
|
|
ASLRRegionBaseAddr = 12,
|
|
|
|
ASLRRegionSize = 13,
|
2019-07-06 08:02:01 +02:00
|
|
|
StackRegionBaseAddr = 14,
|
|
|
|
StackRegionSize = 15,
|
2018-10-26 06:37:14 +02:00
|
|
|
// 3.0.0+
|
2019-07-07 18:42:54 +02:00
|
|
|
SystemResourceSize = 16,
|
|
|
|
SystemResourceUsage = 17,
|
2018-10-26 06:37:14 +02:00
|
|
|
TitleId = 18,
|
|
|
|
// 4.0.0+
|
|
|
|
PrivilegedProcessId = 19,
|
|
|
|
// 5.0.0+
|
|
|
|
UserExceptionContextAddr = 20,
|
2019-06-10 00:20:20 +02:00
|
|
|
// 6.0.0+
|
2019-07-07 20:48:11 +02:00
|
|
|
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
|
|
|
|
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
|
2018-10-26 06:37:14 +02:00
|
|
|
};
|
|
|
|
|
2018-12-02 07:37:15 +01:00
|
|
|
const auto info_id_type = static_cast<GetInfoType>(info_id);
|
2018-01-10 06:58:25 +01:00
|
|
|
|
2018-12-02 07:37:15 +01:00
|
|
|
switch (info_id_type) {
|
2018-12-31 03:09:00 +01:00
|
|
|
case GetInfoType::AllowedCPUCoreMask:
|
|
|
|
case GetInfoType::AllowedThreadPriorityMask:
|
2018-01-16 23:06:45 +01:00
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
|
|
|
case GetInfoType::MapRegionSize:
|
2018-01-15 21:42:57 +01:00
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
|
|
|
case GetInfoType::HeapRegionSize:
|
2018-12-02 07:37:15 +01:00
|
|
|
case GetInfoType::ASLRRegionBaseAddr:
|
|
|
|
case GetInfoType::ASLRRegionSize:
|
2019-07-06 08:02:01 +02:00
|
|
|
case GetInfoType::StackRegionBaseAddr:
|
|
|
|
case GetInfoType::StackRegionSize:
|
2019-06-10 00:12:02 +02:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailable:
|
2019-03-29 03:59:17 +01:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsed:
|
2019-07-07 18:42:54 +02:00
|
|
|
case GetInfoType::SystemResourceSize:
|
|
|
|
case GetInfoType::SystemResourceUsage:
|
2018-12-02 07:37:15 +01:00
|
|
|
case GetInfoType::TitleId:
|
2019-06-10 00:20:20 +02:00
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2019-07-07 20:48:11 +02:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
|
|
|
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
|
2018-12-02 07:37:15 +01:00
|
|
|
if (info_sub_id != 0) {
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& current_process_handle_table =
|
|
|
|
system.Kernel().CurrentProcess()->GetHandleTable();
|
svc: Use the current process' handle table for retrieving the process instance to act upon
The kernel uses the handle table of the current process to retrieve the
process that should be used to retrieve certain information. To someone
not familiar with the kernel, this might raise the question of "Ok,
sounds nice, but doesn't this make it impossible to retrieve information
about the current process?".
No, it doesn't, because HandleTable instances in the kernel have the
notion of a "pseudo-handle", where certain values allow the kernel to
lookup objects outside of a given handle table. Currently, there's only
a pseudo-handle for the current process (0xFFFF8001) and a pseudo-handle
for the current thread (0xFFFF8000), so to retrieve the current process,
one would just pass 0xFFFF8001 into svcGetInfo.
The lookup itself in the handle table would be something like:
template <typename T>
T* Lookup(Handle handle) {
if (handle == PSEUDO_HANDLE_CURRENT_PROCESS) {
return CurrentProcess();
}
if (handle == PSUEDO_HANDLE_CURRENT_THREAD) {
return CurrentThread();
}
return static_cast<T*>(&objects[handle]);
}
which, as is shown, allows accessing the current process or current
thread, even if those two objects aren't actually within the HandleTable
instance.
2018-12-02 08:00:11 +01:00
|
|
|
const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle));
|
2018-12-02 07:37:15 +01:00
|
|
|
if (!process) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (info_id_type) {
|
2018-12-31 03:09:00 +01:00
|
|
|
case GetInfoType::AllowedCPUCoreMask:
|
|
|
|
*result = process->GetCoreMask();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-31 03:09:00 +01:00
|
|
|
case GetInfoType::AllowedThreadPriorityMask:
|
|
|
|
*result = process->GetPriorityMask();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::MapRegionBaseAddr:
|
|
|
|
*result = process->VMManager().GetMapRegionBaseAddress();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::MapRegionSize:
|
|
|
|
*result = process->VMManager().GetMapRegionSize();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::HeapRegionBaseAddr:
|
|
|
|
*result = process->VMManager().GetHeapRegionBaseAddress();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::HeapRegionSize:
|
|
|
|
*result = process->VMManager().GetHeapRegionSize();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::ASLRRegionBaseAddr:
|
|
|
|
*result = process->VMManager().GetASLRRegionBaseAddress();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::ASLRRegionSize:
|
|
|
|
*result = process->VMManager().GetASLRRegionSize();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-06 08:02:01 +02:00
|
|
|
case GetInfoType::StackRegionBaseAddr:
|
|
|
|
*result = process->VMManager().GetStackRegionBaseAddress();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-06 08:02:01 +02:00
|
|
|
case GetInfoType::StackRegionSize:
|
|
|
|
*result = process->VMManager().GetStackRegionSize();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-06-10 00:12:02 +02:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailable:
|
2019-06-10 00:20:20 +02:00
|
|
|
*result = process->GetTotalPhysicalMemoryAvailable();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-03-29 03:59:17 +01:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsed:
|
|
|
|
*result = process->GetTotalPhysicalMemoryUsed();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 18:42:54 +02:00
|
|
|
case GetInfoType::SystemResourceSize:
|
|
|
|
*result = process->GetSystemResourceSize();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::SystemResourceUsage:
|
2019-07-07 21:08:29 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
|
2019-07-07 20:48:11 +02:00
|
|
|
*result = process->GetSystemResourceUsage();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::TitleId:
|
|
|
|
*result = process->GetTitleID();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
|
|
|
case GetInfoType::UserExceptionContextAddr:
|
2019-07-07 10:19:16 +02:00
|
|
|
*result = process->GetTLSRegionAddress();
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 20:48:11 +02:00
|
|
|
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
|
|
|
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
2019-06-10 00:20:20 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2019-07-07 20:48:11 +02:00
|
|
|
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource:
|
|
|
|
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
2019-06-10 00:20:20 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-02 07:37:15 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id);
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
2018-02-04 18:34:45 +01:00
|
|
|
case GetInfoType::IsCurrentProcessBeingDebugged:
|
|
|
|
*result = 0;
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-12-04 06:29:15 +01:00
|
|
|
case GetInfoType::RegisterResourceLimit: {
|
|
|
|
if (handle != 0) {
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info_sub_id != 0) {
|
|
|
|
return ERR_INVALID_COMBINATION;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
Process* const current_process = system.Kernel().CurrentProcess();
|
2018-12-04 06:29:15 +01:00
|
|
|
HandleTable& handle_table = current_process->GetHandleTable();
|
|
|
|
const auto resource_limit = current_process->GetResourceLimit();
|
|
|
|
if (!resource_limit) {
|
|
|
|
*result = KernelHandle::InvalidHandle;
|
|
|
|
// Yes, the kernel considers this a successful operation.
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto table_result = handle_table.Create(resource_limit);
|
|
|
|
if (table_result.Failed()) {
|
|
|
|
return table_result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*result = *table_result;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-01-01 22:01:06 +01:00
|
|
|
case GetInfoType::RandomEntropy:
|
2018-11-13 18:25:43 +01:00
|
|
|
if (handle != 0) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process Handle is non zero, expected 0 result but got {:016X}",
|
|
|
|
handle);
|
2018-11-13 18:25:43 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}",
|
|
|
|
Process::RANDOM_ENTROPY_SIZE, info_sub_id);
|
2018-11-16 20:24:27 +01:00
|
|
|
return ERR_INVALID_COMBINATION;
|
2018-11-13 18:25:43 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
*result = system.Kernel().CurrentProcess()->GetRandomEntropy(info_sub_id);
|
2018-11-13 18:25:43 +01:00
|
|
|
return RESULT_SUCCESS;
|
2018-12-02 07:37:15 +01:00
|
|
|
|
2018-01-15 21:42:57 +01:00
|
|
|
case GetInfoType::PrivilegedProcessId:
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC,
|
2018-07-02 18:20:50 +02:00
|
|
|
"(STUBBED) Attempted to query privileged process id bounds, returned 0");
|
2018-01-15 21:42:57 +01:00
|
|
|
*result = 0;
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
|
2018-10-26 00:42:50 +02:00
|
|
|
case GetInfoType::ThreadTickCount: {
|
|
|
|
constexpr u64 num_cpus = 4;
|
|
|
|
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id >= num_cpus) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Core count is out of range, expected {} but got {}", num_cpus,
|
|
|
|
info_sub_id);
|
2018-11-16 20:24:27 +01:00
|
|
|
return ERR_INVALID_COMBINATION;
|
2018-10-26 00:42:50 +02:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<Thread>(
|
|
|
|
static_cast<Handle>(handle));
|
2018-10-26 00:42:50 +02:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
|
|
|
|
static_cast<Handle>(handle));
|
2018-10-26 00:42:50 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2019-02-14 18:42:58 +01:00
|
|
|
const auto& core_timing = system.CoreTiming();
|
2018-10-26 00:42:50 +02:00
|
|
|
const auto& scheduler = system.CurrentScheduler();
|
|
|
|
const auto* const current_thread = scheduler.GetCurrentThread();
|
|
|
|
const bool same_thread = current_thread == thread;
|
|
|
|
|
|
|
|
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
|
|
|
|
u64 out_ticks = 0;
|
|
|
|
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
|
|
|
|
const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks();
|
|
|
|
|
2019-02-14 18:42:58 +01:00
|
|
|
out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks);
|
2018-10-26 00:42:50 +02:00
|
|
|
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
2019-02-14 18:42:58 +01:00
|
|
|
out_ticks = core_timing.GetTicks() - prev_ctx_ticks;
|
2018-10-26 00:42:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
*result = out_ticks;
|
2018-12-02 07:37:15 +01:00
|
|
|
return RESULT_SUCCESS;
|
2018-10-26 00:42:50 +02:00
|
|
|
}
|
2018-12-02 07:37:15 +01:00
|
|
|
|
2018-01-01 22:01:06 +01:00
|
|
|
default:
|
2018-11-25 22:48:44 +01:00
|
|
|
LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id);
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
2015-05-17 07:06:59 +02:00
|
|
|
}
|
2014-05-02 00:50:36 +02:00
|
|
|
}
|
|
|
|
|
2019-07-07 18:42:54 +02:00
|
|
|
/// Maps memory at a desired address
|
|
|
|
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(addr < addr + size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
2019-07-07 20:48:11 +02:00
|
|
|
Process* const current_process = system.Kernel().CurrentProcess();
|
2019-07-07 18:42:54 +02:00
|
|
|
auto& vm_manager = current_process->VMManager();
|
|
|
|
|
|
|
|
if (current_process->GetSystemResourceSize() == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
|
|
|
return ERR_INVALID_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinMapRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Range not within map region");
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm_manager.MapPhysicalMemory(addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unmaps memory previously mapped via MapPhysicalMemory
|
|
|
|
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(addr < addr + size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
2019-07-07 20:48:11 +02:00
|
|
|
Process* const current_process = system.Kernel().CurrentProcess();
|
2019-07-07 18:42:54 +02:00
|
|
|
auto& vm_manager = current_process->VMManager();
|
|
|
|
|
|
|
|
if (current_process->GetSystemResourceSize() == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
|
|
|
return ERR_INVALID_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinMapRegion(addr, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Range not within map region");
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm_manager.UnmapPhysicalMemory(addr, size);
|
|
|
|
}
|
|
|
|
|
2018-04-03 05:50:17 +02:00
|
|
|
/// Sets the thread activity
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
|
2018-12-03 18:25:27 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
|
|
|
|
if (activity > static_cast<u32>(ThreadActivity::Paused)) {
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto* current_process = system.Kernel().CurrentProcess();
|
2018-12-03 18:25:27 +01:00
|
|
|
const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (thread->GetOwnerProcess() != current_process) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"The current process does not own the current thread, thread_handle={:08X} "
|
|
|
|
"thread_pid={}, "
|
|
|
|
"current_process_pid={}",
|
|
|
|
handle, thread->GetOwnerProcess()->GetProcessID(),
|
|
|
|
current_process->GetProcessID());
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
if (thread == system.CurrentScheduler().GetCurrentThread()) {
|
2018-12-03 18:25:27 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
|
|
|
return ERR_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->SetActivity(static_cast<ThreadActivity>(activity));
|
2019-03-29 22:11:25 +01:00
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-04-03 05:50:17 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the thread context
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, Handle handle) {
|
2018-09-30 01:58:21 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto* current_process = system.Kernel().CurrentProcess();
|
2018-10-20 20:34:41 +02:00
|
|
|
const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
2018-09-30 01:58:21 +02:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
2018-09-30 01:58:21 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
if (thread->GetOwnerProcess() != current_process) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"The current process does not own the current thread, thread_handle={:08X} "
|
|
|
|
"thread_pid={}, "
|
|
|
|
"current_process_pid={}",
|
|
|
|
handle, thread->GetOwnerProcess()->GetProcessID(),
|
|
|
|
current_process->GetProcessID());
|
2018-09-30 01:58:21 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
if (thread == system.CurrentScheduler().GetCurrentThread()) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
2018-12-03 18:25:27 +01:00
|
|
|
return ERR_BUSY;
|
2018-09-30 01:58:21 +02:00
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
Core::ARM_Interface::ThreadContext ctx = thread->GetContext();
|
2018-09-30 01:58:21 +02:00
|
|
|
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
|
|
|
ctx.pstate &= 0xFF0FFE20;
|
|
|
|
|
|
|
|
// If 64-bit, we can just write the context registers directly and we're good.
|
|
|
|
// However, if 32-bit, we have to ensure some registers are zeroed out.
|
|
|
|
if (!current_process->Is64BitProcess()) {
|
|
|
|
std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
|
|
|
|
std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
|
|
|
|
}
|
|
|
|
|
|
|
|
Memory::WriteBlock(thread_context, &ctx, sizeof(ctx));
|
2018-04-03 05:50:17 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-06-02 04:12:54 +02:00
|
|
|
/// Gets the priority for the specified thread
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) {
|
2018-11-26 07:06:13 +01:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle);
|
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
2017-12-31 22:06:11 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
2018-10-20 20:34:41 +02:00
|
|
|
}
|
2017-12-31 22:06:11 +01:00
|
|
|
|
|
|
|
*priority = thread->GetPriority();
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-04 00:49:51 +01:00
|
|
|
}
|
|
|
|
|
2017-12-31 21:58:16 +01:00
|
|
|
/// Sets the priority for the specified thread
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
|
2018-11-26 07:06:13 +01:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2017-12-31 21:58:16 +01:00
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(
|
|
|
|
Kernel_SVC,
|
|
|
|
"An invalid priority was specified, expected {} but got {} for thread_handle={:08X}",
|
|
|
|
THREADPRIO_LOWEST, priority, handle);
|
2018-09-12 10:25:53 +02:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
2017-12-31 21:58:16 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2017-12-31 21:58:16 +01:00
|
|
|
|
2018-10-24 20:07:53 +02:00
|
|
|
SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
2018-10-24 20:07:53 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-12-31 21:58:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
thread->SetPriority(priority);
|
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2017-12-31 21:58:16 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 22:01:04 +01:00
|
|
|
/// Get which CPU core is executing the current thread
|
2019-04-07 00:46:18 +02:00
|
|
|
static u32 GetCurrentProcessorNumber(Core::System& system) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
2019-04-07 00:46:18 +02:00
|
|
|
return system.CurrentScheduler().GetCurrentThread()->GetProcessorID();
|
2017-12-31 22:01:04 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
|
|
|
|
u64 size, u32 permissions) {
|
2018-07-02 18:20:50 +02:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
|
|
|
shared_memory_handle, addr, size, permissions);
|
2018-01-14 23:15:31 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-11-26 09:47:39 +01:00
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
addr, size);
|
2018-10-18 04:39:21 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-09-14 02:16:43 +02:00
|
|
|
const auto permissions_type = static_cast<MemoryPermission>(permissions);
|
|
|
|
if (permissions_type != MemoryPermission::Read &&
|
|
|
|
permissions_type != MemoryPermission::ReadWrite) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
|
|
|
|
permissions);
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2018-10-20 20:34:41 +02:00
|
|
|
auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-01-14 23:15:31 +01:00
|
|
|
if (!shared_memory) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
|
|
|
|
shared_memory_handle);
|
2018-01-14 23:15:31 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
const auto& vm_manager = current_process->VMManager();
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}",
|
|
|
|
addr, size);
|
2018-10-18 04:39:21 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
2018-11-19 15:05:04 +01:00
|
|
|
return shared_memory->Map(*current_process, addr, permissions_type, MemoryPermission::DontCare);
|
2018-01-14 23:15:31 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode UnmapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr,
|
|
|
|
u64 size) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
|
2018-07-02 18:20:50 +02:00
|
|
|
shared_memory_handle, addr, size);
|
2018-02-22 20:16:43 +01:00
|
|
|
|
2018-10-18 18:55:27 +02:00
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr);
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-11-26 09:47:39 +01:00
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size);
|
2018-09-14 02:16:43 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}",
|
|
|
|
addr, size);
|
2018-10-18 04:39:21 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2018-10-20 20:34:41 +02:00
|
|
|
auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle);
|
2018-10-18 04:39:21 +02:00
|
|
|
if (!shared_memory) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
|
|
|
|
shared_memory_handle);
|
2018-10-18 04:39:21 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& vm_manager = current_process->VMManager();
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(addr, size)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}",
|
|
|
|
addr, size);
|
2018-10-18 04:39:21 +02:00
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
2018-02-22 20:16:43 +01:00
|
|
|
|
2019-03-29 23:01:38 +01:00
|
|
|
return shared_memory->Unmap(*current_process, addr, size);
|
2018-02-22 20:16:43 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
|
|
|
|
VAddr page_info_address, Handle process_handle,
|
|
|
|
VAddr address) {
|
2018-12-12 17:34:01 +01:00
|
|
|
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
SharedPtr<Process> process = handle_table.Get<Process>(process_handle);
|
2017-12-30 19:40:28 +01:00
|
|
|
if (!process) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
|
|
|
|
process_handle);
|
2015-07-17 21:45:12 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
2017-10-20 05:00:46 +02:00
|
|
|
}
|
2018-12-06 16:59:22 +01:00
|
|
|
|
|
|
|
const auto& vm_manager = process->VMManager();
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
const MemoryInfo memory_info = vm_manager.QueryMemory(address);
|
2018-12-06 16:59:22 +01:00
|
|
|
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
Memory::Write64(memory_info_address, memory_info.base_address);
|
|
|
|
Memory::Write64(memory_info_address + 8, memory_info.size);
|
|
|
|
Memory::Write32(memory_info_address + 16, memory_info.state);
|
|
|
|
Memory::Write32(memory_info_address + 20, memory_info.attributes);
|
|
|
|
Memory::Write32(memory_info_address + 24, memory_info.permission);
|
svc: Write out the complete MemoryInfo structure in QueryProcessMemory
In the previous change, the memory writing was moved into the service
function itself, however it still had a problem, in that the entire
MemoryInfo structure wasn't being written out, only the first 32 bytes
of it were being written out. We still need to write out the trailing
two reference count members and zero out the padding bits.
Not doing this can result in wrong behavior in userland code in the following
scenario:
MemoryInfo info; // Put on the stack, not quaranteed to be zeroed out.
svcQueryMemory(&info, ...);
if (info.device_refcount == ...) // Whoops, uninitialized read.
This can also cause the wrong thing to happen if the user code uses
std::memcmp to compare the struct, with another one (questionable, but
allowed), as the padding bits are not guaranteed to be a deterministic
value. Note that the kernel itself also fully zeroes out the structure
before writing it out including the padding bits.
2018-12-12 18:52:31 +01:00
|
|
|
Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count);
|
|
|
|
Memory::Write32(memory_info_address + 28, memory_info.device_ref_count);
|
|
|
|
Memory::Write32(memory_info_address + 36, 0);
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
|
|
|
|
// Page info appears to be currently unused by the kernel and is always set to zero.
|
|
|
|
Memory::Write32(page_info_address, 0);
|
2018-12-06 16:59:22 +01:00
|
|
|
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-05-16 02:17:30 +02:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
|
|
|
|
VAddr page_info_address, VAddr query_address) {
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
LOG_TRACE(Kernel_SVC,
|
|
|
|
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
|
|
|
|
"query_address=0x{:016X}",
|
|
|
|
memory_info_address, page_info_address, query_address);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
return QueryProcessMemory(system, memory_info_address, page_info_address, CurrentProcess,
|
svc: Handle memory writing explicitly within QueryProcessMemory
Moves the memory writes directly into QueryProcessMemory instead of
letting the wrapper function do it. It would be inaccurate to allow the
handler to do it because there's cases where memory shouldn't even be
written to. For example, if the given process handle is invalid.
HOWEVER, if the memory writing is within the wrapper, then we have no
control over if these memory writes occur, meaning in an error case, 68
bytes of memory randomly get trashed with zeroes, 64 of those being
written to wherever the memory info address points to, and the remaining
4 being written wherever the page info address points to.
One solution in this case would be to just conditionally check within
the handler itself, but this is kind of smelly, given the handler
shouldn't be performing conditional behavior itself, it's a behavior of
the managed function. In other words, if you remove the handler from the
equation entirely, does the function still retain its proper behavior?
In this case, no.
Now, we don't potentially trash memory from this function if an invalid
query is performed.
2018-12-12 17:48:06 +01:00
|
|
|
query_address);
|
2015-07-17 21:45:12 +02:00
|
|
|
}
|
|
|
|
|
2019-04-12 05:21:13 +02:00
|
|
|
static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
|
|
|
|
u64 src_address, u64 size) {
|
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
|
|
|
|
"src_address=0x{:016X}, size=0x{:016X}",
|
|
|
|
process_handle, dst_address, src_address, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(src_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
|
|
|
|
src_address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(dst_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
|
|
|
|
dst_address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range overflows the address space (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range overflows the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
|
|
|
auto process = handle_table.Get<Process>(process_handle);
|
|
|
|
if (!process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
|
|
|
|
process_handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& vm_manager = process->VMManager();
|
|
|
|
if (!vm_manager.IsWithinAddressSpace(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range is not within the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm_manager.MapCodeMemory(dst_address, src_address, size);
|
|
|
|
}
|
|
|
|
|
2019-05-19 01:01:27 +02:00
|
|
|
static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
|
|
|
|
u64 dst_address, u64 src_address, u64 size) {
|
2019-04-12 06:38:54 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}",
|
|
|
|
process_handle, dst_address, src_address, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(dst_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
|
|
|
|
dst_address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(src_address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
|
|
|
|
src_address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range overflows the address space (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range overflows the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
|
|
|
auto process = handle_table.Get<Process>(process_handle);
|
|
|
|
if (!process) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
|
|
|
|
process_handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& vm_manager = process->VMManager();
|
|
|
|
if (!vm_manager.IsWithinAddressSpace(src_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Source address range is not within the address space (src_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
src_address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vm_manager.IsWithinASLRRegion(dst_address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
dst_address, size);
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm_manager.UnmapCodeMemory(dst_address, src_address, size);
|
|
|
|
}
|
|
|
|
|
2018-01-01 20:38:34 +01:00
|
|
|
/// Exits the current process
|
2019-04-07 00:46:18 +02:00
|
|
|
static void ExitProcess(Core::System& system) {
|
|
|
|
auto* current_process = system.Kernel().CurrentProcess();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
2018-09-21 08:06:47 +02:00
|
|
|
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
|
|
|
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
|
2018-03-13 22:49:59 +01:00
|
|
|
"Process has already exited");
|
2018-01-01 20:38:34 +01:00
|
|
|
|
2018-09-21 08:06:47 +02:00
|
|
|
current_process->PrepareForTermination();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
|
|
|
// Kill the current thread
|
2019-04-07 00:46:18 +02:00
|
|
|
system.CurrentScheduler().GetCurrentThread()->Stop();
|
2018-01-01 20:38:34 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
system.PrepareReschedule();
|
2018-01-01 20:38:34 +01:00
|
|
|
}
|
|
|
|
|
2017-12-31 22:10:01 +01:00
|
|
|
/// Creates a new thread
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
|
|
|
|
VAddr stack_top, u32 priority, s32 processor_id) {
|
2019-04-16 03:33:07 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
2018-12-04 04:13:50 +01:00
|
|
|
"called entrypoint=0x{:08X}, arg=0x{:08X}, stacktop=0x{:08X}, "
|
2018-11-26 07:06:13 +01:00
|
|
|
"threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}",
|
2018-12-01 08:05:19 +01:00
|
|
|
entry_point, arg, stack_top, priority, processor_id, *out_handle);
|
2018-11-26 07:06:13 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2017-12-31 22:10:01 +01:00
|
|
|
|
2018-12-28 03:14:59 +01:00
|
|
|
if (processor_id == THREADPROCESSORID_IDEAL) {
|
|
|
|
// Set the target CPU to the one specified by the process.
|
|
|
|
processor_id = current_process->GetIdealCore();
|
|
|
|
ASSERT(processor_id != THREADPROCESSORID_IDEAL);
|
2017-12-31 22:10:01 +01:00
|
|
|
}
|
|
|
|
|
2018-12-31 02:59:52 +01:00
|
|
|
if (processor_id < THREADPROCESSORID_0 || processor_id > THREADPROCESSORID_3) {
|
2018-09-12 10:27:35 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
2017-12-31 22:10:01 +01:00
|
|
|
}
|
|
|
|
|
2018-12-31 03:20:07 +01:00
|
|
|
const u64 core_mask = current_process->GetCoreMask();
|
|
|
|
if ((core_mask | (1ULL << processor_id)) != core_mask) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread core specified ({})", processor_id);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priority > THREADPRIO_LOWEST) {
|
2018-12-31 03:27:30 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Invalid thread priority specified ({}). Must be within the range 0-64",
|
|
|
|
priority);
|
2018-12-31 03:20:07 +01:00
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((1ULL << priority) & current_process->GetPriorityMask()) == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid thread priority specified ({})", priority);
|
|
|
|
return ERR_INVALID_THREAD_PRIORITY;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
2018-01-03 02:40:30 +01:00
|
|
|
CASCADE_RESULT(SharedPtr<Thread> thread,
|
2019-04-15 21:54:25 +02:00
|
|
|
Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top,
|
2018-10-20 20:34:41 +02:00
|
|
|
*current_process));
|
|
|
|
|
2019-04-14 12:06:04 +02:00
|
|
|
const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
|
|
|
|
if (new_thread_handle.Failed()) {
|
2018-11-26 07:06:13 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}",
|
2019-04-14 12:06:04 +02:00
|
|
|
new_thread_handle.Code().raw);
|
|
|
|
return new_thread_handle.Code();
|
2018-10-04 00:47:57 +02:00
|
|
|
}
|
2019-04-14 12:06:04 +02:00
|
|
|
*out_handle = *new_thread_handle;
|
2017-12-31 22:10:01 +01:00
|
|
|
|
2019-04-15 21:54:25 +02:00
|
|
|
// Set the thread name for debugging purposes.
|
|
|
|
thread->SetName(
|
|
|
|
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
|
2017-12-31 22:10:01 +01:00
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2017-12-31 22:10:01 +01:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-30 19:40:28 +01:00
|
|
|
/// Starts the thread for the provided handle
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
2019-04-16 03:33:07 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
2017-12-30 19:37:07 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
2017-12-30 19:37:07 +01:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
|
|
|
thread_handle);
|
2017-12-30 19:37:07 +01:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
|
2018-05-19 23:57:44 +02:00
|
|
|
|
2017-12-30 19:37:07 +01:00
|
|
|
thread->ResumeFromWait();
|
2018-12-03 18:25:27 +01:00
|
|
|
|
|
|
|
if (thread->GetStatus() == ThreadStatus::Ready) {
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-12-03 18:25:27 +01:00
|
|
|
}
|
2017-12-30 19:37:07 +01:00
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-12-31 22:11:27 +01:00
|
|
|
/// Called when a thread exits
|
2019-04-07 00:46:18 +02:00
|
|
|
static void ExitThread(Core::System& system) {
|
2019-04-16 03:33:07 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
|
2019-03-16 04:38:51 +01:00
|
|
|
|
|
|
|
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
|
|
|
current_thread->Stop();
|
2019-03-29 22:11:25 +01:00
|
|
|
system.GlobalScheduler().RemoveThread(current_thread);
|
2019-03-16 04:38:51 +01:00
|
|
|
system.PrepareReschedule();
|
2017-12-31 22:11:27 +01:00
|
|
|
}
|
|
|
|
|
2014-06-01 16:37:19 +02:00
|
|
|
/// Sleep the current thread
|
2019-04-07 00:46:18 +02:00
|
|
|
static void SleepThread(Core::System& system, s64 nanoseconds) {
|
2019-04-16 03:33:07 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
2014-11-26 06:34:14 +01:00
|
|
|
|
2018-11-22 06:33:53 +01:00
|
|
|
enum class SleepType : s64 {
|
|
|
|
YieldWithoutLoadBalancing = 0,
|
2018-12-02 06:44:40 +01:00
|
|
|
YieldWithLoadBalancing = -1,
|
|
|
|
YieldAndWaitForLoadBalancing = -2,
|
2018-11-22 06:33:53 +01:00
|
|
|
};
|
2017-01-05 20:14:22 +01:00
|
|
|
|
2019-03-16 04:28:29 +01:00
|
|
|
auto& scheduler = system.CurrentScheduler();
|
|
|
|
auto* const current_thread = scheduler.GetCurrentThread();
|
2019-10-12 16:13:25 +02:00
|
|
|
bool is_redundant = false;
|
2019-03-16 04:28:29 +01:00
|
|
|
|
2018-11-19 05:44:19 +01:00
|
|
|
if (nanoseconds <= 0) {
|
2018-11-22 06:33:53 +01:00
|
|
|
switch (static_cast<SleepType>(nanoseconds)) {
|
|
|
|
case SleepType::YieldWithoutLoadBalancing:
|
2019-10-12 16:13:25 +02:00
|
|
|
is_redundant = current_thread->YieldSimple();
|
2018-11-19 05:44:19 +01:00
|
|
|
break;
|
2018-11-22 06:33:53 +01:00
|
|
|
case SleepType::YieldWithLoadBalancing:
|
2019-10-12 16:13:25 +02:00
|
|
|
is_redundant = current_thread->YieldAndBalanceLoad();
|
2018-11-19 05:44:19 +01:00
|
|
|
break;
|
2018-11-22 06:33:53 +01:00
|
|
|
case SleepType::YieldAndWaitForLoadBalancing:
|
2019-10-12 16:13:25 +02:00
|
|
|
is_redundant = current_thread->YieldAndWaitForLoadBalancing();
|
2018-11-19 05:44:19 +01:00
|
|
|
break;
|
|
|
|
default:
|
2018-12-03 23:29:21 +01:00
|
|
|
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
2018-11-19 05:44:19 +01:00
|
|
|
}
|
2018-12-03 23:29:21 +01:00
|
|
|
} else {
|
2019-03-16 04:28:29 +01:00
|
|
|
current_thread->Sleep(nanoseconds);
|
2018-11-19 05:44:19 +01:00
|
|
|
}
|
2017-01-01 17:57:02 +01:00
|
|
|
|
2019-10-12 16:13:25 +02:00
|
|
|
if (is_redundant) {
|
2019-10-09 00:35:04 +02:00
|
|
|
// If it's redundant, the core is pretty much idle. Some games keep idling
|
2019-10-12 16:13:25 +02:00
|
|
|
// a core while it's doing nothing, we advance timing to avoid costly continuous
|
2019-10-09 00:35:04 +02:00
|
|
|
// calls.
|
|
|
|
system.CoreTiming().AddTicks(2000);
|
2019-09-10 16:23:43 +02:00
|
|
|
}
|
2019-10-09 00:35:04 +02:00
|
|
|
system.PrepareReschedule(current_thread->GetProcessorID());
|
2014-06-01 16:37:19 +02:00
|
|
|
}
|
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
/// Wait process wide key atomic
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr,
|
|
|
|
VAddr condition_variable_addr, Handle thread_handle,
|
|
|
|
s64 nano_seconds) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(
|
2018-01-09 03:41:37 +01:00
|
|
|
Kernel_SVC,
|
2018-05-02 15:14:28 +02:00
|
|
|
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
|
2018-01-09 03:41:37 +01:00
|
|
|
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2019-04-04 02:23:14 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
|
|
|
LOG_ERROR(
|
|
|
|
Kernel_SVC,
|
|
|
|
"Given mutex address must not be within the kernel address space. address=0x{:016X}",
|
|
|
|
mutex_addr);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::IsWordAligned(mutex_addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
|
|
|
|
mutex_addr);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2019-10-12 13:57:32 +02:00
|
|
|
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2019-03-14 05:29:54 +01:00
|
|
|
const auto& handle_table = current_process->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
2018-01-06 22:14:12 +01:00
|
|
|
ASSERT(thread);
|
|
|
|
|
2019-03-14 05:29:54 +01:00
|
|
|
const auto release_result = current_process->GetMutex().Release(mutex_addr);
|
|
|
|
if (release_result.IsError()) {
|
|
|
|
return release_result;
|
|
|
|
}
|
2018-01-08 17:35:03 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
|
2018-10-04 00:47:57 +02:00
|
|
|
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
|
|
|
current_thread->SetMutexWaitAddress(mutex_addr);
|
|
|
|
current_thread->SetWaitHandle(thread_handle);
|
2019-03-14 00:55:04 +01:00
|
|
|
current_thread->SetStatus(ThreadStatus::WaitCondVar);
|
2018-10-04 00:47:57 +02:00
|
|
|
current_thread->InvalidateWakeupCallback();
|
2019-11-15 01:13:18 +01:00
|
|
|
current_process->InsertConditionVariableThread(current_thread);
|
2018-01-08 17:35:03 +01:00
|
|
|
|
2018-04-20 21:39:28 +02:00
|
|
|
current_thread->WakeAfterDelay(nano_seconds);
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2018-04-21 03:15:16 +02:00
|
|
|
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
2018-01-06 22:14:12 +01:00
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(current_thread->GetProcessorID());
|
2018-01-06 22:14:12 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Signal process wide key
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr,
|
|
|
|
s32 target) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
2018-07-02 18:20:50 +02:00
|
|
|
condition_variable_addr, target);
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2019-10-12 13:57:32 +02:00
|
|
|
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
|
|
|
|
2018-05-19 23:58:30 +02:00
|
|
|
// Retrieve a list of all threads that are waiting for this condition variable.
|
2019-11-15 01:13:18 +01:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
|
|
|
std::vector<SharedPtr<Thread>> waiting_threads =
|
|
|
|
current_process->GetConditionVariableThreads(condition_variable_addr);
|
2018-05-19 23:58:30 +02:00
|
|
|
|
2019-11-16 18:55:21 +01:00
|
|
|
// Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
|
2018-05-19 23:58:30 +02:00
|
|
|
// them all.
|
2018-09-15 15:21:06 +02:00
|
|
|
std::size_t last = waiting_threads.size();
|
2019-11-16 18:55:21 +01:00
|
|
|
if (target > 0)
|
2019-03-14 00:04:40 +01:00
|
|
|
last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
|
2018-05-19 23:58:30 +02:00
|
|
|
|
2018-09-15 15:21:06 +02:00
|
|
|
for (std::size_t index = 0; index < last; ++index) {
|
2018-05-19 23:58:30 +02:00
|
|
|
auto& thread = waiting_threads[index];
|
2018-05-06 04:00:34 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
2018-05-06 04:00:34 +02:00
|
|
|
|
2019-03-20 01:57:31 +01:00
|
|
|
// liberate Cond Var Thread.
|
2019-11-15 01:13:18 +01:00
|
|
|
current_process->RemoveConditionVariableThread(thread);
|
2019-03-20 01:57:31 +01:00
|
|
|
thread->SetCondVarWaitAddress(0);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const std::size_t current_core = system.CurrentCoreIndex();
|
|
|
|
auto& monitor = system.Monitor();
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
// Atomically read the value of the mutex.
|
|
|
|
u32 mutex_val = 0;
|
2019-10-08 00:57:13 +02:00
|
|
|
u32 update_val = 0;
|
|
|
|
const VAddr mutex_address = thread->GetMutexWaitAddress();
|
2018-07-22 19:27:24 +02:00
|
|
|
do {
|
2019-10-08 00:57:13 +02:00
|
|
|
monitor.SetExclusive(current_core, mutex_address);
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
// If the mutex is not yet acquired, acquire it.
|
2019-10-08 00:57:13 +02:00
|
|
|
mutex_val = Memory::Read32(mutex_address);
|
2018-07-22 19:27:24 +02:00
|
|
|
|
|
|
|
if (mutex_val != 0) {
|
2019-10-08 00:57:13 +02:00
|
|
|
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
|
|
|
|
} else {
|
|
|
|
update_val = thread->GetWaitHandle();
|
2018-07-22 19:27:24 +02:00
|
|
|
}
|
2019-10-08 00:57:13 +02:00
|
|
|
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
|
2018-05-19 23:58:30 +02:00
|
|
|
if (mutex_val == 0) {
|
|
|
|
// We were able to acquire the mutex, resume this thread.
|
2019-03-20 01:57:31 +01:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
|
2018-05-19 23:58:30 +02:00
|
|
|
thread->ResumeFromWait();
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
auto* const lock_owner = thread->GetLockOwner();
|
|
|
|
if (lock_owner != nullptr) {
|
2018-05-19 23:58:30 +02:00
|
|
|
lock_owner->RemoveMutexWaiter(thread);
|
2018-10-04 00:47:57 +02:00
|
|
|
}
|
2018-05-19 23:58:30 +02:00
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->SetLockOwner(nullptr);
|
|
|
|
thread->SetMutexWaitAddress(0);
|
|
|
|
thread->SetWaitHandle(0);
|
2019-10-08 00:57:13 +02:00
|
|
|
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-05-19 23:58:30 +02:00
|
|
|
} else {
|
2018-07-22 19:27:24 +02:00
|
|
|
// The mutex is already owned by some other thread, make this thread wait on it.
|
2018-10-20 20:34:41 +02:00
|
|
|
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
auto owner = handle_table.Get<Thread>(owner_handle);
|
2018-05-19 23:58:30 +02:00
|
|
|
ASSERT(owner);
|
2019-03-14 00:55:04 +01:00
|
|
|
ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
|
2018-10-04 00:47:57 +02:00
|
|
|
thread->InvalidateWakeupCallback();
|
2019-03-20 01:57:31 +01:00
|
|
|
thread->SetStatus(ThreadStatus::WaitMutex);
|
2018-05-19 23:58:30 +02:00
|
|
|
|
|
|
|
owner->AddMutexWaiter(thread);
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2018-05-19 23:58:30 +02:00
|
|
|
}
|
|
|
|
}
|
2018-01-07 22:55:17 +01:00
|
|
|
|
2015-01-23 06:36:58 +01:00
|
|
|
return RESULT_SUCCESS;
|
2014-12-09 05:52:27 +01:00
|
|
|
}
|
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// Wait for an address (via Address Arbiter)
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
|
|
|
s64 timeout) {
|
2019-07-18 04:05:08 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
|
|
|
|
type, value, timeout);
|
2019-04-07 00:46:18 +02:00
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 08:47:59 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
2019-04-07 00:46:18 +02:00
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
2018-11-26 09:47:39 +01:00
|
|
|
if (!Common::IsWordAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2019-03-08 00:34:22 +01:00
|
|
|
const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
|
2019-04-02 15:22:53 +02:00
|
|
|
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
2019-06-19 15:11:18 +02:00
|
|
|
const ResultCode result =
|
|
|
|
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
|
|
|
|
if (result == RESULT_SUCCESS) {
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule();
|
2019-06-19 15:11:18 +02:00
|
|
|
}
|
2019-03-29 22:11:25 +01:00
|
|
|
return result;
|
2018-06-21 08:49:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Signals to an address (via Address Arbiter)
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
|
|
|
s32 num_to_wake) {
|
2019-07-18 04:05:08 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
|
|
|
|
address, type, value, num_to_wake);
|
2019-04-07 00:46:18 +02:00
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the passed address is a kernel virtual address, return invalid memory state.
|
2018-06-22 08:47:59 +02:00
|
|
|
if (Memory::IsKernelVirtualAddress(address)) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
2019-04-07 00:46:18 +02:00
|
|
|
|
2018-06-21 08:49:43 +02:00
|
|
|
// If the address is not properly aligned to 4 bytes, return invalid address.
|
2018-11-26 09:47:39 +01:00
|
|
|
if (!Common::IsWordAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
2018-06-21 08:49:43 +02:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2019-03-08 00:42:44 +01:00
|
|
|
const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
2019-03-08 00:42:44 +01:00
|
|
|
return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
|
2018-06-21 08:49:43 +02:00
|
|
|
}
|
|
|
|
|
2018-01-12 03:59:31 +01:00
|
|
|
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
2019-04-07 00:46:18 +02:00
|
|
|
static u64 GetSystemTick(Core::System& system) {
|
2018-11-26 07:06:13 +01:00
|
|
|
LOG_TRACE(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& core_timing = system.CoreTiming();
|
2019-11-22 21:55:42 +01:00
|
|
|
|
|
|
|
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
|
|
|
|
const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())};
|
2018-01-12 03:59:31 +01:00
|
|
|
|
|
|
|
// Advance time to defeat dumb games that busy-wait for the frame to end.
|
2019-02-14 18:42:58 +01:00
|
|
|
core_timing.AddTicks(400);
|
2018-01-12 03:59:31 +01:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
/// Close a handle
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CloseHandle(Core::System& system, Handle handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
return handle_table.Close(handle);
|
2015-08-06 02:39:53 +02:00
|
|
|
}
|
|
|
|
|
2018-12-05 01:59:29 +01:00
|
|
|
/// Clears the signaled state of an event or process.
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode ResetSignal(Core::System& system, Handle handle) {
|
2018-11-18 21:49:17 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-05 01:59:29 +01:00
|
|
|
|
2018-11-27 00:34:07 +01:00
|
|
|
auto event = handle_table.Get<ReadableEvent>(handle);
|
2018-12-05 01:59:29 +01:00
|
|
|
if (event) {
|
|
|
|
return event->Reset();
|
|
|
|
}
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-12-05 01:59:29 +01:00
|
|
|
auto process = handle_table.Get<Process>(handle);
|
|
|
|
if (process) {
|
|
|
|
return process->ClearSignalState();
|
|
|
|
}
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-12-05 01:59:29 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid handle (0x{:08X})", handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
2018-01-08 03:24:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a TransferMemory object
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size,
|
|
|
|
u32 permissions) {
|
2018-11-09 23:02:50 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size,
|
|
|
|
permissions);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(addr)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(size) || size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2018-11-11 22:39:25 +01:00
|
|
|
if (!IsValidAddressRange(addr, size)) {
|
2018-11-09 23:02:50 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})",
|
|
|
|
addr, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-11-11 22:39:25 +01:00
|
|
|
const auto perms = static_cast<MemoryPermission>(permissions);
|
|
|
|
if (perms != MemoryPermission::None && perms != MemoryPermission::Read &&
|
|
|
|
perms != MemoryPermission::ReadWrite) {
|
2018-11-09 23:02:50 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
|
|
|
|
permissions);
|
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
2019-03-13 08:09:27 +01:00
|
|
|
auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms);
|
2018-11-09 23:02:50 +01:00
|
|
|
|
2019-03-13 08:09:27 +01:00
|
|
|
auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
|
|
|
const auto result = handle_table.Create(std::move(transfer_mem_handle));
|
|
|
|
if (result.Failed()) {
|
|
|
|
return result.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*handle = *result;
|
2018-01-08 03:24:19 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode MapTransferMemory(Core::System& system, Handle handle, VAddr address, u64 size,
|
|
|
|
u32 permission_raw) {
|
2019-03-13 09:50:13 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}",
|
|
|
|
handle, address, size, permission_raw);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
|
|
|
|
address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
|
|
|
|
size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Given address and size overflows the 64-bit range (address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto permissions = static_cast<MemoryPermission>(permission_raw);
|
|
|
|
if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read &&
|
|
|
|
permissions != MemoryPermission::ReadWrite) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).",
|
|
|
|
permission_raw);
|
|
|
|
return ERR_INVALID_STATE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& kernel = system.Kernel();
|
2019-03-13 09:50:13 +01:00
|
|
|
const auto* const current_process = kernel.CurrentProcess();
|
|
|
|
const auto& handle_table = current_process->GetHandleTable();
|
|
|
|
|
|
|
|
auto transfer_memory = handle_table.Get<TransferMemory>(handle);
|
|
|
|
if (!transfer_memory) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
|
|
|
|
handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Given address and size don't fully fit within the ASLR region "
|
|
|
|
"(address=0x{:016X}, size=0x{:016X}).",
|
|
|
|
address, size);
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return transfer_memory->MapMemory(address, size, permissions);
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode UnmapTransferMemory(Core::System& system, Handle handle, VAddr address,
|
|
|
|
u64 size) {
|
2019-03-13 10:02:14 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle,
|
|
|
|
address, size);
|
|
|
|
|
|
|
|
if (!Common::Is4KBAligned(address)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).",
|
|
|
|
address);
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0 || !Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).",
|
|
|
|
size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsValidAddressRange(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Given address and size overflows the 64-bit range (address=0x{:016X}, "
|
|
|
|
"size=0x{:016X}).",
|
|
|
|
address, size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& kernel = system.Kernel();
|
2019-03-13 10:02:14 +01:00
|
|
|
const auto* const current_process = kernel.CurrentProcess();
|
|
|
|
const auto& handle_table = current_process->GetHandleTable();
|
|
|
|
|
|
|
|
auto transfer_memory = handle_table.Get<TransferMemory>(handle);
|
|
|
|
if (!transfer_memory) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).",
|
|
|
|
handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!current_process->VMManager().IsWithinASLRRegion(address, size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Given address and size don't fully fit within the ASLR region "
|
|
|
|
"(address=0x{:016X}, size=0x{:016X}).",
|
|
|
|
address, size);
|
|
|
|
return ERR_INVALID_MEMORY_RANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return transfer_memory->UnmapMemory(address, size);
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
|
|
|
|
u64* mask) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
if (!thread) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
|
|
|
thread_handle);
|
2018-05-06 05:13:15 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
2018-10-04 00:47:57 +02:00
|
|
|
*core = thread->GetIdealCore();
|
|
|
|
*mask = thread->GetAffinityMask();
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2018-03-30 03:07:49 +02:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core,
|
2019-04-16 02:34:55 +02:00
|
|
|
u64 affinity_mask) {
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}",
|
|
|
|
thread_handle, core, affinity_mask);
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2019-04-16 02:34:55 +02:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2018-05-06 05:13:15 +02:00
|
|
|
|
2018-12-28 03:14:59 +01:00
|
|
|
if (core == static_cast<u32>(THREADPROCESSORID_IDEAL)) {
|
2019-04-16 02:34:55 +02:00
|
|
|
const u8 ideal_cpu_core = current_process->GetIdealCore();
|
2018-10-04 00:47:57 +02:00
|
|
|
|
2018-12-28 03:14:59 +01:00
|
|
|
ASSERT(ideal_cpu_core != static_cast<u8>(THREADPROCESSORID_IDEAL));
|
2018-10-04 00:47:57 +02:00
|
|
|
|
2018-12-28 03:14:59 +01:00
|
|
|
// Set the target CPU to the ideal core specified by the process.
|
|
|
|
core = ideal_cpu_core;
|
2019-04-16 02:34:55 +02:00
|
|
|
affinity_mask = 1ULL << core;
|
|
|
|
} else {
|
|
|
|
const u64 core_mask = current_process->GetCoreMask();
|
|
|
|
|
|
|
|
if ((core_mask | affinity_mask) != core_mask) {
|
|
|
|
LOG_ERROR(
|
|
|
|
Kernel_SVC,
|
|
|
|
"Invalid processor ID specified (core_mask=0x{:08X}, affinity_mask=0x{:016X})",
|
|
|
|
core_mask, affinity_mask);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
|
|
|
}
|
2018-05-30 19:03:19 +02:00
|
|
|
|
2019-04-16 02:34:55 +02:00
|
|
|
if (affinity_mask == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Specfified affinity mask is zero.");
|
|
|
|
return ERR_INVALID_COMBINATION;
|
|
|
|
}
|
2018-05-30 19:03:19 +02:00
|
|
|
|
2019-04-16 02:34:55 +02:00
|
|
|
if (core < Core::NUM_CPU_CORES) {
|
|
|
|
if ((affinity_mask & (1ULL << core)) == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Core is not enabled for the current mask, core={}, mask={:016X}", core,
|
|
|
|
affinity_mask);
|
|
|
|
return ERR_INVALID_COMBINATION;
|
|
|
|
}
|
|
|
|
} else if (core != static_cast<u32>(THREADPROCESSORID_DONT_CARE) &&
|
|
|
|
core != static_cast<u32>(THREADPROCESSORID_DONT_UPDATE)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid processor ID specified (core={}).", core);
|
|
|
|
return ERR_INVALID_PROCESSOR_ID;
|
|
|
|
}
|
2018-05-30 19:03:19 +02:00
|
|
|
}
|
|
|
|
|
2019-04-16 02:34:55 +02:00
|
|
|
const auto& handle_table = current_process->GetHandleTable();
|
|
|
|
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
|
|
|
if (!thread) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
|
|
|
thread_handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
2018-05-30 19:03:19 +02:00
|
|
|
}
|
|
|
|
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
2019-04-16 02:34:55 +02:00
|
|
|
thread->ChangeCore(core, affinity_mask);
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule(thread->GetProcessorID());
|
|
|
|
|
2018-01-16 23:23:53 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CreateSharedMemory(Core::System& system, Handle* handle, u64 size,
|
|
|
|
u32 local_permissions, u32 remote_permissions) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
|
2018-07-02 18:20:50 +02:00
|
|
|
local_permissions, remote_permissions);
|
2018-11-26 09:47:39 +01:00
|
|
|
if (size == 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is 0");
|
2018-11-27 02:29:06 +01:00
|
|
|
return ERR_INVALID_SIZE;
|
2018-11-26 09:47:39 +01:00
|
|
|
}
|
2018-11-27 02:29:06 +01:00
|
|
|
if (!Common::Is4KBAligned(size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size);
|
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
2018-08-28 18:30:33 +02:00
|
|
|
|
2018-11-27 02:53:18 +01:00
|
|
|
if (size >= MAIN_MEMORY_SIZE) {
|
2018-11-27 02:29:06 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Size is not less than 8GB, 0x{:016X}", size);
|
2018-09-14 03:04:43 +02:00
|
|
|
return ERR_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto local_perms = static_cast<MemoryPermission>(local_permissions);
|
|
|
|
if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Invalid local memory permissions, expected Read or ReadWrite but got "
|
|
|
|
"local_permissions={}",
|
|
|
|
static_cast<u32>(local_permissions));
|
2018-09-14 03:04:43 +02:00
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
|
|
|
|
if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
|
|
|
|
remote_perms != MemoryPermission::DontCare) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Invalid remote memory permissions, expected Read, ReadWrite or DontCare but got "
|
|
|
|
"remote_permissions={}",
|
|
|
|
static_cast<u32>(remote_permissions));
|
2018-09-14 03:04:43 +02:00
|
|
|
return ERR_INVALID_MEMORY_PERMISSIONS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
2018-12-10 19:42:01 +01:00
|
|
|
auto process = kernel.CurrentProcess();
|
|
|
|
auto& handle_table = process->GetHandleTable();
|
|
|
|
auto shared_mem_handle = SharedMemory::Create(kernel, process, size, local_perms, remote_perms);
|
2018-01-20 01:35:25 +01:00
|
|
|
|
2018-08-28 18:30:33 +02:00
|
|
|
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
|
2018-01-20 01:35:25 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
|
2018-12-04 21:11:18 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
2018-12-04 21:11:18 +01:00
|
|
|
const auto [readable_event, writable_event] =
|
2019-11-03 10:10:12 +01:00
|
|
|
WritableEvent::CreateEventPair(kernel, "CreateEvent");
|
2018-12-04 21:11:18 +01:00
|
|
|
|
|
|
|
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
|
|
|
|
|
|
|
const auto write_create_result = handle_table.Create(writable_event);
|
|
|
|
if (write_create_result.Failed()) {
|
|
|
|
return write_create_result.Code();
|
|
|
|
}
|
|
|
|
*write_handle = *write_create_result;
|
|
|
|
|
|
|
|
const auto read_create_result = handle_table.Create(readable_event);
|
|
|
|
if (read_create_result.Failed()) {
|
|
|
|
handle_table.Close(*write_create_result);
|
|
|
|
return read_create_result.Code();
|
|
|
|
}
|
|
|
|
*read_handle = *read_create_result;
|
|
|
|
|
|
|
|
LOG_DEBUG(Kernel_SVC,
|
|
|
|
"successful. Writable event handle=0x{:08X}, Readable event handle=0x{:08X}",
|
|
|
|
*write_create_result, *read_create_result);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode ClearEvent(Core::System& system, Handle handle) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle);
|
2018-02-22 15:28:15 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-04 04:50:16 +01:00
|
|
|
|
|
|
|
auto writable_event = handle_table.Get<WritableEvent>(handle);
|
|
|
|
if (writable_event) {
|
|
|
|
writable_event->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
2018-10-20 20:34:41 +02:00
|
|
|
}
|
|
|
|
|
2018-12-04 04:50:16 +01:00
|
|
|
auto readable_event = handle_table.Get<ReadableEvent>(handle);
|
|
|
|
if (readable_event) {
|
|
|
|
readable_event->Clear();
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_ERROR(Kernel_SVC, "Event handle does not exist, handle=0x{:08X}", handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
2018-02-22 15:28:15 +01:00
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SignalEvent(Core::System& system, Handle handle) {
|
2018-12-04 21:39:49 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
HandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-12-04 21:39:49 +01:00
|
|
|
auto writable_event = handle_table.Get<WritableEvent>(handle);
|
|
|
|
|
|
|
|
if (!writable_event) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Non-existent writable event handle used (0x{:08X})", handle);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
writable_event->Signal();
|
2019-04-02 15:22:53 +02:00
|
|
|
system.PrepareReschedule();
|
2018-12-04 21:39:49 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
|
2018-10-13 20:31:46 +02:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
|
|
|
|
|
|
|
|
// This function currently only allows retrieving a process' status.
|
|
|
|
enum class InfoType {
|
|
|
|
Status,
|
|
|
|
};
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
2018-10-20 20:34:41 +02:00
|
|
|
const auto process = handle_table.Get<Process>(process_handle);
|
2018-10-13 20:31:46 +02:00
|
|
|
if (!process) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
|
|
|
|
process_handle);
|
2018-10-13 20:31:46 +02:00
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto info_type = static_cast<InfoType>(type);
|
|
|
|
if (info_type != InfoType::Status) {
|
2018-11-26 09:47:39 +01:00
|
|
|
LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type);
|
2018-10-13 20:31:46 +02:00
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
*out = static_cast<u64>(process->GetStatus());
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
2018-11-27 00:23:12 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called");
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto& kernel = system.Kernel();
|
2018-11-27 00:23:12 +01:00
|
|
|
auto resource_limit = ResourceLimit::Create(kernel);
|
|
|
|
|
|
|
|
auto* const current_process = kernel.CurrentProcess();
|
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
const auto handle = current_process->GetHandleTable().Create(std::move(resource_limit));
|
|
|
|
if (handle.Failed()) {
|
|
|
|
return handle.Code();
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_handle = *handle;
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_value,
|
|
|
|
Handle resource_limit, u32 resource_type) {
|
2018-11-27 00:48:07 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto limit_value = RetrieveResourceLimitValue(system, resource_limit, resource_type,
|
2018-11-27 01:14:29 +01:00
|
|
|
ResourceLimitValueType::LimitValue);
|
|
|
|
if (limit_value.Failed()) {
|
|
|
|
return limit_value.Code();
|
2018-11-27 00:48:07 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 01:14:29 +01:00
|
|
|
*out_value = static_cast<u64>(*limit_value);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-11-27 00:48:07 +01:00
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_value,
|
|
|
|
Handle resource_limit, u32 resource_type) {
|
2018-11-27 01:14:29 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type);
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto current_value = RetrieveResourceLimitValue(system, resource_limit, resource_type,
|
2018-11-27 01:14:29 +01:00
|
|
|
ResourceLimitValueType::CurrentValue);
|
|
|
|
if (current_value.Failed()) {
|
|
|
|
return current_value.Code();
|
2018-11-27 00:48:07 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 01:14:29 +01:00
|
|
|
*out_value = static_cast<u64>(*current_value);
|
2018-11-27 00:48:07 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit,
|
|
|
|
u32 resource_type, u64 value) {
|
2018-11-27 01:51:09 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit,
|
|
|
|
resource_type, value);
|
|
|
|
|
|
|
|
const auto type = static_cast<ResourceType>(resource_type);
|
|
|
|
if (!IsValidResourceType(type)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type);
|
|
|
|
return ERR_INVALID_ENUM_VALUE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
auto* const current_process = system.Kernel().CurrentProcess();
|
2018-11-27 01:51:09 +01:00
|
|
|
ASSERT(current_process != nullptr);
|
|
|
|
|
|
|
|
auto resource_limit_object =
|
|
|
|
current_process->GetHandleTable().Get<ResourceLimit>(resource_limit);
|
|
|
|
if (!resource_limit_object) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}",
|
|
|
|
resource_limit);
|
|
|
|
return ERR_INVALID_HANDLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value));
|
|
|
|
if (set_result.IsError()) {
|
|
|
|
LOG_ERROR(
|
|
|
|
Kernel_SVC,
|
|
|
|
"Attempted to lower resource limit ({}) for category '{}' below its current value ({})",
|
|
|
|
resource_limit_object->GetMaxResourceValue(type), resource_type,
|
|
|
|
resource_limit_object->GetCurrentResourceValue(type));
|
|
|
|
return set_result;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
|
|
|
|
VAddr out_process_ids, u32 out_process_ids_size) {
|
2019-03-20 20:03:52 +01:00
|
|
|
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
|
|
|
|
out_process_ids, out_process_ids_size);
|
|
|
|
|
|
|
|
// If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
|
|
|
|
if ((out_process_ids_size & 0xF0000000) != 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC,
|
|
|
|
"Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
|
|
|
|
out_process_ids_size);
|
|
|
|
return ERR_OUT_OF_RANGE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto& kernel = system.Kernel();
|
2019-03-20 20:03:52 +01:00
|
|
|
const auto& vm_manager = kernel.CurrentProcess()->VMManager();
|
|
|
|
const auto total_copy_size = out_process_ids_size * sizeof(u64);
|
|
|
|
|
|
|
|
if (out_process_ids_size > 0 &&
|
|
|
|
!vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
|
|
|
|
out_process_ids, out_process_ids + total_copy_size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& process_list = kernel.GetProcessList();
|
|
|
|
const auto num_processes = process_list.size();
|
|
|
|
const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
|
|
|
|
|
|
|
|
for (std::size_t i = 0; i < copy_amount; ++i) {
|
|
|
|
Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
|
|
|
|
out_process_ids += sizeof(u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_num_processes = static_cast<u32>(num_processes);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-05-19 01:01:27 +02:00
|
|
|
static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
|
|
|
|
u32 out_thread_ids_size, Handle debug_handle) {
|
2019-03-20 23:53:48 +01:00
|
|
|
// TODO: Handle this case when debug events are supported.
|
|
|
|
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
|
|
|
|
|
|
|
|
LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
|
|
|
|
out_thread_ids, out_thread_ids_size);
|
|
|
|
|
|
|
|
// If the size is negative or larger than INT32_MAX / sizeof(u64)
|
|
|
|
if ((out_thread_ids_size & 0xF0000000) != 0) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
|
|
|
|
out_thread_ids_size);
|
|
|
|
return ERR_OUT_OF_RANGE;
|
|
|
|
}
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
const auto* const current_process = system.Kernel().CurrentProcess();
|
2019-03-20 23:53:48 +01:00
|
|
|
const auto& vm_manager = current_process->VMManager();
|
|
|
|
const auto total_copy_size = out_thread_ids_size * sizeof(u64);
|
|
|
|
|
|
|
|
if (out_thread_ids_size > 0 &&
|
|
|
|
!vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
|
|
|
|
LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
|
|
|
|
out_thread_ids, out_thread_ids + total_copy_size);
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& thread_list = current_process->GetThreadList();
|
|
|
|
const auto num_threads = thread_list.size();
|
|
|
|
const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
|
|
|
|
|
|
|
|
auto list_iter = thread_list.cbegin();
|
|
|
|
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
|
|
|
Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
|
|
|
|
out_thread_ids += sizeof(u64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_num_threads = static_cast<u32>(num_threads);
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-05-06 05:04:25 +02:00
|
|
|
namespace {
|
2016-09-18 02:38:01 +02:00
|
|
|
struct FunctionDef {
|
2019-04-07 00:46:18 +02:00
|
|
|
using Func = void(Core::System&);
|
2015-05-06 05:04:25 +02:00
|
|
|
|
2016-09-18 02:38:01 +02:00
|
|
|
u32 id;
|
|
|
|
Func* func;
|
|
|
|
const char* name;
|
|
|
|
};
|
2017-10-14 23:30:07 +02:00
|
|
|
} // namespace
|
2015-05-06 05:04:25 +02:00
|
|
|
|
|
|
|
static const FunctionDef SVC_Table[] = {
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x00, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x01, SvcWrap<SetHeapSize>, "SetHeapSize"},
|
2018-11-03 16:01:34 +01:00
|
|
|
{0x02, SvcWrap<SetMemoryPermission>, "SetMemoryPermission"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x03, SvcWrap<SetMemoryAttribute>, "SetMemoryAttribute"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x04, SvcWrap<MapMemory>, "MapMemory"},
|
|
|
|
{0x05, SvcWrap<UnmapMemory>, "UnmapMemory"},
|
|
|
|
{0x06, SvcWrap<QueryMemory>, "QueryMemory"},
|
|
|
|
{0x07, SvcWrap<ExitProcess>, "ExitProcess"},
|
|
|
|
{0x08, SvcWrap<CreateThread>, "CreateThread"},
|
|
|
|
{0x09, SvcWrap<StartThread>, "StartThread"},
|
|
|
|
{0x0A, SvcWrap<ExitThread>, "ExitThread"},
|
|
|
|
{0x0B, SvcWrap<SleepThread>, "SleepThread"},
|
|
|
|
{0x0C, SvcWrap<GetThreadPriority>, "GetThreadPriority"},
|
|
|
|
{0x0D, SvcWrap<SetThreadPriority>, "SetThreadPriority"},
|
2018-03-30 03:07:49 +02:00
|
|
|
{0x0E, SvcWrap<GetThreadCoreMask>, "GetThreadCoreMask"},
|
2018-01-16 23:23:53 +01:00
|
|
|
{0x0F, SvcWrap<SetThreadCoreMask>, "SetThreadCoreMask"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x10, SvcWrap<GetCurrentProcessorNumber>, "GetCurrentProcessorNumber"},
|
2018-12-04 21:39:49 +01:00
|
|
|
{0x11, SvcWrap<SignalEvent>, "SignalEvent"},
|
2018-02-22 15:28:15 +01:00
|
|
|
{0x12, SvcWrap<ClearEvent>, "ClearEvent"},
|
2018-01-14 23:15:31 +01:00
|
|
|
{0x13, SvcWrap<MapSharedMemory>, "MapSharedMemory"},
|
2018-02-22 20:16:43 +01:00
|
|
|
{0x14, SvcWrap<UnmapSharedMemory>, "UnmapSharedMemory"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x15, SvcWrap<CreateTransferMemory>, "CreateTransferMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x16, SvcWrap<CloseHandle>, "CloseHandle"},
|
2018-01-08 03:24:19 +01:00
|
|
|
{0x17, SvcWrap<ResetSignal>, "ResetSignal"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x18, SvcWrap<WaitSynchronization>, "WaitSynchronization"},
|
2018-01-09 21:02:04 +01:00
|
|
|
{0x19, SvcWrap<CancelSynchronization>, "CancelSynchronization"},
|
2018-01-18 02:34:52 +01:00
|
|
|
{0x1A, SvcWrap<ArbitrateLock>, "ArbitrateLock"},
|
|
|
|
{0x1B, SvcWrap<ArbitrateUnlock>, "ArbitrateUnlock"},
|
2018-01-06 22:14:12 +01:00
|
|
|
{0x1C, SvcWrap<WaitProcessWideKeyAtomic>, "WaitProcessWideKeyAtomic"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x1D, SvcWrap<SignalProcessWideKey>, "SignalProcessWideKey"},
|
2018-01-12 03:59:31 +01:00
|
|
|
{0x1E, SvcWrap<GetSystemTick>, "GetSystemTick"},
|
2018-01-18 02:34:52 +01:00
|
|
|
{0x1F, SvcWrap<ConnectToNamedPort>, "ConnectToNamedPort"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x20, nullptr, "SendSyncRequestLight"},
|
|
|
|
{0x21, SvcWrap<SendSyncRequest>, "SendSyncRequest"},
|
|
|
|
{0x22, nullptr, "SendSyncRequestWithUserBuffer"},
|
|
|
|
{0x23, nullptr, "SendAsyncRequestWithUserBuffer"},
|
|
|
|
{0x24, SvcWrap<GetProcessId>, "GetProcessId"},
|
|
|
|
{0x25, SvcWrap<GetThreadId>, "GetThreadId"},
|
|
|
|
{0x26, SvcWrap<Break>, "Break"},
|
|
|
|
{0x27, SvcWrap<OutputDebugString>, "OutputDebugString"},
|
|
|
|
{0x28, nullptr, "ReturnFromException"},
|
|
|
|
{0x29, SvcWrap<GetInfo>, "GetInfo"},
|
|
|
|
{0x2A, nullptr, "FlushEntireDataCache"},
|
|
|
|
{0x2B, nullptr, "FlushDataCache"},
|
2019-07-07 18:42:54 +02:00
|
|
|
{0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"},
|
|
|
|
{0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x2E, nullptr, "GetFutureThreadInfo"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x2F, nullptr, "GetLastThreadInfo"},
|
2018-11-27 00:48:07 +01:00
|
|
|
{0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},
|
2018-11-27 01:14:29 +01:00
|
|
|
{0x31, SvcWrap<GetResourceLimitCurrentValue>, "GetResourceLimitCurrentValue"},
|
2018-04-03 05:50:17 +02:00
|
|
|
{0x32, SvcWrap<SetThreadActivity>, "SetThreadActivity"},
|
|
|
|
{0x33, SvcWrap<GetThreadContext>, "GetThreadContext"},
|
2018-06-21 08:49:43 +02:00
|
|
|
{0x34, SvcWrap<WaitForAddress>, "WaitForAddress"},
|
|
|
|
{0x35, SvcWrap<SignalToAddress>, "SignalToAddress"},
|
2019-04-19 20:33:01 +02:00
|
|
|
{0x36, nullptr, "SynchronizePreemptionState"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x37, nullptr, "Unknown"},
|
|
|
|
{0x38, nullptr, "Unknown"},
|
|
|
|
{0x39, nullptr, "Unknown"},
|
|
|
|
{0x3A, nullptr, "Unknown"},
|
|
|
|
{0x3B, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x3C, nullptr, "DumpInfo"},
|
2018-04-17 17:37:43 +02:00
|
|
|
{0x3D, nullptr, "DumpInfoNew"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x3E, nullptr, "Unknown"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x3F, nullptr, "Unknown"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x40, nullptr, "CreateSession"},
|
|
|
|
{0x41, nullptr, "AcceptSession"},
|
|
|
|
{0x42, nullptr, "ReplyAndReceiveLight"},
|
|
|
|
{0x43, nullptr, "ReplyAndReceive"},
|
|
|
|
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
|
2018-12-04 21:11:18 +01:00
|
|
|
{0x45, SvcWrap<CreateEvent>, "CreateEvent"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x46, nullptr, "Unknown"},
|
2017-10-14 23:30:07 +02:00
|
|
|
{0x47, nullptr, "Unknown"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x48, nullptr, "MapPhysicalMemoryUnsafe"},
|
|
|
|
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
|
|
|
|
{0x4A, nullptr, "SetUnsafeLimit"},
|
|
|
|
{0x4B, nullptr, "CreateCodeMemory"},
|
|
|
|
{0x4C, nullptr, "ControlCodeMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x4D, nullptr, "SleepSystem"},
|
|
|
|
{0x4E, nullptr, "ReadWriteRegister"},
|
|
|
|
{0x4F, nullptr, "SetProcessActivity"},
|
2018-01-20 01:35:25 +01:00
|
|
|
{0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"},
|
2019-03-13 09:50:13 +01:00
|
|
|
{0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"},
|
2019-03-13 10:02:14 +01:00
|
|
|
{0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x53, nullptr, "CreateInterruptEvent"},
|
|
|
|
{0x54, nullptr, "QueryPhysicalAddress"},
|
|
|
|
{0x55, nullptr, "QueryIoMapping"},
|
|
|
|
{0x56, nullptr, "CreateDeviceAddressSpace"},
|
|
|
|
{0x57, nullptr, "AttachDeviceAddressSpace"},
|
|
|
|
{0x58, nullptr, "DetachDeviceAddressSpace"},
|
|
|
|
{0x59, nullptr, "MapDeviceAddressSpaceByForce"},
|
|
|
|
{0x5A, nullptr, "MapDeviceAddressSpaceAligned"},
|
|
|
|
{0x5B, nullptr, "MapDeviceAddressSpace"},
|
|
|
|
{0x5C, nullptr, "UnmapDeviceAddressSpace"},
|
|
|
|
{0x5D, nullptr, "InvalidateProcessDataCache"},
|
|
|
|
{0x5E, nullptr, "StoreProcessDataCache"},
|
|
|
|
{0x5F, nullptr, "FlushProcessDataCache"},
|
|
|
|
{0x60, nullptr, "DebugActiveProcess"},
|
|
|
|
{0x61, nullptr, "BreakDebugProcess"},
|
|
|
|
{0x62, nullptr, "TerminateDebugProcess"},
|
|
|
|
{0x63, nullptr, "GetDebugEvent"},
|
|
|
|
{0x64, nullptr, "ContinueDebugEvent"},
|
2019-03-20 20:03:52 +01:00
|
|
|
{0x65, SvcWrap<GetProcessList>, "GetProcessList"},
|
2019-03-20 23:53:48 +01:00
|
|
|
{0x66, SvcWrap<GetThreadList>, "GetThreadList"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x67, nullptr, "GetDebugThreadContext"},
|
|
|
|
{0x68, nullptr, "SetDebugThreadContext"},
|
|
|
|
{0x69, nullptr, "QueryDebugProcessMemory"},
|
|
|
|
{0x6A, nullptr, "ReadDebugProcessMemory"},
|
|
|
|
{0x6B, nullptr, "WriteDebugProcessMemory"},
|
|
|
|
{0x6C, nullptr, "SetHardwareBreakPoint"},
|
|
|
|
{0x6D, nullptr, "GetDebugThreadParam"},
|
2016-09-18 02:38:01 +02:00
|
|
|
{0x6E, nullptr, "Unknown"},
|
2018-09-24 02:03:38 +02:00
|
|
|
{0x6F, nullptr, "GetSystemInfo"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x70, nullptr, "CreatePort"},
|
|
|
|
{0x71, nullptr, "ManageNamedPort"},
|
|
|
|
{0x72, nullptr, "ConnectToPort"},
|
|
|
|
{0x73, nullptr, "SetProcessMemoryPermission"},
|
|
|
|
{0x74, nullptr, "MapProcessMemory"},
|
|
|
|
{0x75, nullptr, "UnmapProcessMemory"},
|
2018-12-12 19:42:21 +01:00
|
|
|
{0x76, SvcWrap<QueryProcessMemory>, "QueryProcessMemory"},
|
2019-04-12 05:21:13 +02:00
|
|
|
{0x77, SvcWrap<MapProcessCodeMemory>, "MapProcessCodeMemory"},
|
2019-04-12 06:38:54 +02:00
|
|
|
{0x78, SvcWrap<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x79, nullptr, "CreateProcess"},
|
|
|
|
{0x7A, nullptr, "StartProcess"},
|
|
|
|
{0x7B, nullptr, "TerminateProcess"},
|
2018-10-13 20:31:46 +02:00
|
|
|
{0x7C, SvcWrap<GetProcessInfo>, "GetProcessInfo"},
|
2018-11-27 00:23:12 +01:00
|
|
|
{0x7D, SvcWrap<CreateResourceLimit>, "CreateResourceLimit"},
|
2018-11-27 01:51:09 +01:00
|
|
|
{0x7E, SvcWrap<SetResourceLimitLimitValue>, "SetResourceLimitLimitValue"},
|
2018-01-03 02:47:26 +01:00
|
|
|
{0x7F, nullptr, "CallSecureMonitor"},
|
2014-04-11 01:58:28 +02:00
|
|
|
};
|
|
|
|
|
2015-07-21 09:51:36 +02:00
|
|
|
static const FunctionDef* GetSVCInfo(u32 func_num) {
|
2018-04-20 04:36:48 +02:00
|
|
|
if (func_num >= std::size(SVC_Table)) {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_ERROR(Kernel_SVC, "Unknown svc=0x{:02X}", func_num);
|
2015-05-06 05:04:25 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
return &SVC_Table[func_num];
|
|
|
|
}
|
|
|
|
|
2015-08-17 23:25:21 +02:00
|
|
|
MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
|
|
|
|
|
2019-04-07 00:46:18 +02:00
|
|
|
void CallSVC(Core::System& system, u32 immediate) {
|
2015-08-17 23:25:21 +02:00
|
|
|
MICROPROFILE_SCOPE(Kernel_SVC);
|
2015-05-06 05:04:25 +02:00
|
|
|
|
2017-10-14 23:30:07 +02:00
|
|
|
// Lock the global kernel mutex when we enter the kernel HLE.
|
2019-04-01 18:29:59 +02:00
|
|
|
std::lock_guard lock{HLE::g_hle_lock};
|
2017-10-14 23:30:07 +02:00
|
|
|
|
2015-07-21 09:51:36 +02:00
|
|
|
const FunctionDef* info = GetSVCInfo(immediate);
|
2015-05-06 05:04:25 +02:00
|
|
|
if (info) {
|
|
|
|
if (info->func) {
|
2019-04-07 00:46:18 +02:00
|
|
|
info->func(system);
|
2015-05-06 05:04:25 +02:00
|
|
|
} else {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unimplemented SVC function {}(..)", info->name);
|
2015-05-06 05:04:25 +02:00
|
|
|
}
|
2017-10-14 23:30:07 +02:00
|
|
|
} else {
|
2018-07-02 18:13:26 +02:00
|
|
|
LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate);
|
2015-05-06 05:04:25 +02:00
|
|
|
}
|
2014-04-11 01:58:28 +02:00
|
|
|
}
|
2014-04-12 00:44:21 +02:00
|
|
|
|
2018-01-03 02:40:30 +01:00
|
|
|
} // namespace Kernel
|