chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 02:06:02 +02:00
|
|
|
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-04-05 04:26:06 +02:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2018-01-09 22:33:46 +01:00
|
|
|
#include <array>
|
2022-06-06 18:56:01 +02:00
|
|
|
#include <span>
|
2023-02-05 20:22:02 +01:00
|
|
|
#include <string>
|
2019-05-18 03:43:26 +02:00
|
|
|
#include <vector>
|
2022-05-31 20:37:37 +02:00
|
|
|
|
|
|
|
#include <dynarmic/interface/halt_reason.h>
|
|
|
|
|
2022-02-02 18:59:36 +01:00
|
|
|
#include "common/common_funcs.h"
|
2014-04-09 02:15:08 +02:00
|
|
|
#include "common/common_types.h"
|
2020-02-29 18:58:50 +01:00
|
|
|
#include "core/hardware_properties.h"
|
2018-09-21 01:28:48 +02:00
|
|
|
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
|
|
|
namespace Common {
|
|
|
|
struct PageTable;
|
|
|
|
}
|
|
|
|
|
2018-09-21 01:28:48 +02:00
|
|
|
namespace Kernel {
|
|
|
|
enum class VMAPermission : u8;
|
2022-06-06 18:56:01 +02:00
|
|
|
enum class DebugWatchpointType : u8;
|
|
|
|
struct DebugWatchpoint;
|
|
|
|
} // namespace Kernel
|
2014-04-05 04:26:06 +02:00
|
|
|
|
2018-08-25 03:43:32 +02:00
|
|
|
namespace Core {
|
2019-11-26 20:10:49 +01:00
|
|
|
class System;
|
2020-02-25 03:04:12 +01:00
|
|
|
class CPUInterruptHandler;
|
2018-08-25 03:43:32 +02:00
|
|
|
|
2022-06-06 18:56:01 +02:00
|
|
|
using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>;
|
2020-02-29 18:58:50 +01:00
|
|
|
|
2018-09-18 08:49:40 +02:00
|
|
|
/// Generic ARMv8 CPU interface
|
2022-02-02 18:59:36 +01:00
|
|
|
class ARM_Interface {
|
2014-04-05 04:26:06 +02:00
|
|
|
public:
|
2022-02-02 18:59:36 +01:00
|
|
|
YUZU_NON_COPYABLE(ARM_Interface);
|
|
|
|
YUZU_NON_MOVEABLE(ARM_Interface);
|
|
|
|
|
2022-07-08 02:06:46 +02:00
|
|
|
explicit ARM_Interface(System& system_, bool uses_wall_clock_)
|
|
|
|
: system{system_}, uses_wall_clock{uses_wall_clock_} {}
|
2019-11-26 20:10:49 +01:00
|
|
|
virtual ~ARM_Interface() = default;
|
2014-04-05 04:26:06 +02:00
|
|
|
|
2020-03-02 05:46:10 +01:00
|
|
|
struct ThreadContext32 {
|
2020-03-21 03:37:42 +01:00
|
|
|
std::array<u32, 16> cpu_registers{};
|
2020-06-20 01:40:07 +02:00
|
|
|
std::array<u32, 64> extension_registers{};
|
2020-03-21 03:37:42 +01:00
|
|
|
u32 cpsr{};
|
|
|
|
u32 fpscr{};
|
|
|
|
u32 fpexc{};
|
|
|
|
u32 tpidr{};
|
2020-03-02 05:46:10 +01:00
|
|
|
};
|
|
|
|
// Internally within the kernel, it expects the AArch32 version of the
|
|
|
|
// thread context to be 344 bytes in size.
|
2020-06-20 01:40:07 +02:00
|
|
|
static_assert(sizeof(ThreadContext32) == 0x150);
|
2020-03-02 05:46:10 +01:00
|
|
|
|
|
|
|
struct ThreadContext64 {
|
2020-03-21 03:37:42 +01:00
|
|
|
std::array<u64, 31> cpu_registers{};
|
|
|
|
u64 sp{};
|
|
|
|
u64 pc{};
|
|
|
|
u32 pstate{};
|
|
|
|
std::array<u8, 4> padding{};
|
|
|
|
std::array<u128, 32> vector_registers{};
|
|
|
|
u32 fpcr{};
|
|
|
|
u32 fpsr{};
|
|
|
|
u64 tpidr{};
|
2016-12-22 06:08:09 +01:00
|
|
|
};
|
2018-09-29 23:58:26 +02:00
|
|
|
// Internally within the kernel, it expects the AArch64 version of the
|
|
|
|
// thread context to be 800 bytes in size.
|
2020-03-02 05:46:10 +01:00
|
|
|
static_assert(sizeof(ThreadContext64) == 0x320);
|
2016-12-22 06:08:09 +01:00
|
|
|
|
2018-02-14 18:47:48 +01:00
|
|
|
/// Runs the CPU until an event happens
|
2022-05-31 20:37:37 +02:00
|
|
|
void Run();
|
2014-05-17 17:59:18 +02:00
|
|
|
|
2016-06-27 20:38:49 +02:00
|
|
|
/// Clear all instruction cache
|
|
|
|
virtual void ClearInstructionCache() = 0;
|
|
|
|
|
2020-11-14 08:20:32 +01:00
|
|
|
/**
|
|
|
|
* Clear instruction cache range
|
|
|
|
* @param addr Start address of the cache range to clear
|
|
|
|
* @param size Size of the cache range to clear, starting at addr
|
|
|
|
*/
|
2023-03-18 02:26:04 +01:00
|
|
|
virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
|
2020-11-14 08:20:32 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Notifies CPU emulation that the current page table has changed.
|
|
|
|
* @param new_page_table The new page table.
|
|
|
|
* @param new_address_space_size_in_bits The new usable size of the address space in bits.
|
|
|
|
* This can be either 32, 36, or 39 on official software.
|
|
|
|
*/
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
|
|
|
virtual void PageTableChanged(Common::PageTable& new_page_table,
|
|
|
|
std::size_t new_address_space_size_in_bits) = 0;
|
2017-09-24 23:44:13 +02:00
|
|
|
|
2014-04-05 21:23:59 +02:00
|
|
|
/**
|
|
|
|
* Set the Program Counter to an address
|
|
|
|
* @param addr Address to set PC to
|
|
|
|
*/
|
2017-08-29 03:09:42 +02:00
|
|
|
virtual void SetPC(u64 addr) = 0;
|
2014-04-05 21:23:59 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the current Program Counter
|
|
|
|
* @return Returns current PC
|
|
|
|
*/
|
2017-08-29 03:09:42 +02:00
|
|
|
virtual u64 GetPC() const = 0;
|
2014-04-05 21:23:59 +02:00
|
|
|
|
2022-04-21 02:17:48 +02:00
|
|
|
/**
|
|
|
|
* Get the current Stack Pointer
|
|
|
|
* @return Returns current SP
|
|
|
|
*/
|
|
|
|
virtual u64 GetSP() const = 0;
|
|
|
|
|
2014-04-05 21:23:59 +02:00
|
|
|
/**
|
|
|
|
* Get an ARM register
|
2017-08-29 03:09:42 +02:00
|
|
|
* @param index Register index
|
2014-04-05 21:23:59 +02:00
|
|
|
* @return Returns the value in the register
|
|
|
|
*/
|
2020-10-21 04:07:39 +02:00
|
|
|
virtual u64 GetReg(int index) const = 0;
|
2014-04-05 21:23:59 +02:00
|
|
|
|
2014-04-11 01:57:56 +02:00
|
|
|
/**
|
|
|
|
* Set an ARM register
|
2017-08-29 03:09:42 +02:00
|
|
|
* @param index Register index
|
2014-04-11 01:57:56 +02:00
|
|
|
* @param value Value to set register to
|
|
|
|
*/
|
2020-10-21 04:07:39 +02:00
|
|
|
virtual void SetReg(int index, u64 value) = 0;
|
2014-04-11 01:57:56 +02:00
|
|
|
|
2015-08-07 03:24:25 +02:00
|
|
|
/**
|
2018-09-18 08:49:40 +02:00
|
|
|
* Gets the value of a specified vector register.
|
|
|
|
*
|
|
|
|
* @param index The index of the vector register.
|
|
|
|
* @return the value within the vector register.
|
2015-08-07 03:24:25 +02:00
|
|
|
*/
|
2020-10-21 04:07:39 +02:00
|
|
|
virtual u128 GetVectorReg(int index) const = 0;
|
2015-08-07 03:24:25 +02:00
|
|
|
|
|
|
|
/**
|
2018-09-18 08:49:40 +02:00
|
|
|
* Sets a given value into a vector register.
|
|
|
|
*
|
|
|
|
* @param index The index of the vector register.
|
|
|
|
* @param value The new value to place in the register.
|
2015-08-07 03:24:25 +02:00
|
|
|
*/
|
2020-10-21 04:07:39 +02:00
|
|
|
virtual void SetVectorReg(int index, u128 value) = 0;
|
2015-08-07 03:24:25 +02:00
|
|
|
|
2014-04-05 21:23:59 +02:00
|
|
|
/**
|
2018-09-18 08:49:40 +02:00
|
|
|
* Get the current PSTATE register
|
|
|
|
* @return Returns the value of the PSTATE register
|
2014-04-05 21:23:59 +02:00
|
|
|
*/
|
2018-09-18 08:49:40 +02:00
|
|
|
virtual u32 GetPSTATE() const = 0;
|
2014-04-05 07:23:28 +02:00
|
|
|
|
2014-05-12 04:14:13 +02:00
|
|
|
/**
|
2018-09-18 08:49:40 +02:00
|
|
|
* Set the current PSTATE register
|
|
|
|
* @param pstate Value to set PSTATE to
|
2014-05-12 04:14:13 +02:00
|
|
|
*/
|
2018-09-18 08:49:40 +02:00
|
|
|
virtual void SetPSTATE(u32 pstate) = 0;
|
2014-05-12 04:14:13 +02:00
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
virtual u64 GetTlsAddress() const = 0;
|
2017-09-30 20:16:39 +02:00
|
|
|
|
2023-03-18 02:26:04 +01:00
|
|
|
virtual void SetTlsAddress(u64 address) = 0;
|
2017-09-30 20:16:39 +02:00
|
|
|
|
2018-09-18 08:49:40 +02:00
|
|
|
/**
|
|
|
|
* Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
|
|
|
|
*
|
|
|
|
* @return the value within the register.
|
|
|
|
*/
|
2018-07-21 02:57:45 +02:00
|
|
|
virtual u64 GetTPIDR_EL0() const = 0;
|
|
|
|
|
2018-09-18 08:49:40 +02:00
|
|
|
/**
|
|
|
|
* Sets a new value within the TPIDR_EL0 (read/write software thread ID) register.
|
|
|
|
*
|
|
|
|
* @param value The new value to place in the register.
|
|
|
|
*/
|
2018-07-21 02:57:45 +02:00
|
|
|
virtual void SetTPIDR_EL0(u64 value) = 0;
|
|
|
|
|
2020-03-02 05:46:10 +01:00
|
|
|
virtual void SaveContext(ThreadContext32& ctx) = 0;
|
|
|
|
virtual void SaveContext(ThreadContext64& ctx) = 0;
|
|
|
|
virtual void LoadContext(const ThreadContext32& ctx) = 0;
|
|
|
|
virtual void LoadContext(const ThreadContext64& ctx) = 0;
|
2022-06-06 18:56:01 +02:00
|
|
|
void LoadWatchpointArray(const WatchpointArray& wp);
|
2014-05-21 00:50:16 +02:00
|
|
|
|
2018-09-18 08:49:40 +02:00
|
|
|
/// Clears the exclusive monitor's state.
|
2018-07-16 12:24:00 +02:00
|
|
|
virtual void ClearExclusiveState() = 0;
|
|
|
|
|
2022-04-03 17:29:05 +02:00
|
|
|
/// Signal an interrupt and ask the core to halt as soon as possible.
|
|
|
|
virtual void SignalInterrupt() = 0;
|
|
|
|
|
2022-07-08 02:06:46 +02:00
|
|
|
/// Clear a previous interrupt.
|
|
|
|
virtual void ClearInterrupt() = 0;
|
|
|
|
|
2019-05-18 03:43:26 +02:00
|
|
|
struct BacktraceEntry {
|
|
|
|
std::string module;
|
|
|
|
u64 address;
|
|
|
|
u64 original_address;
|
|
|
|
u64 offset;
|
|
|
|
std::string name;
|
|
|
|
};
|
|
|
|
|
2022-04-21 02:17:48 +02:00
|
|
|
static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
|
|
|
|
const ThreadContext32& ctx);
|
2020-03-20 19:05:47 +01:00
|
|
|
static std::vector<BacktraceEntry> GetBacktraceFromContext(System& system,
|
|
|
|
const ThreadContext64& ctx);
|
|
|
|
|
2022-04-21 02:17:48 +02:00
|
|
|
virtual std::vector<BacktraceEntry> GetBacktrace() const = 0;
|
2019-05-18 03:43:26 +02:00
|
|
|
|
2018-12-31 02:44:46 +01:00
|
|
|
void LogBacktrace() const;
|
2019-11-26 20:10:49 +01:00
|
|
|
|
2022-05-31 20:37:37 +02:00
|
|
|
static constexpr Dynarmic::HaltReason step_thread = Dynarmic::HaltReason::Step;
|
|
|
|
static constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
|
|
|
|
static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
|
|
|
|
static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4;
|
2022-07-11 23:48:08 +02:00
|
|
|
static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::MemoryAbort;
|
2022-06-21 02:39:16 +02:00
|
|
|
static constexpr Dynarmic::HaltReason no_execute = Dynarmic::HaltReason::UserDefined6;
|
2022-05-31 01:35:01 +02:00
|
|
|
|
2019-11-26 20:10:49 +01:00
|
|
|
protected:
|
|
|
|
/// System context that this ARM interface is running under.
|
|
|
|
System& system;
|
2022-06-06 18:56:01 +02:00
|
|
|
const WatchpointArray* watchpoints;
|
2020-03-28 20:23:28 +01:00
|
|
|
bool uses_wall_clock;
|
2022-04-21 02:17:48 +02:00
|
|
|
|
|
|
|
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
|
2022-06-06 18:56:01 +02:00
|
|
|
const Kernel::DebugWatchpoint* MatchingWatchpoint(
|
2023-03-18 02:26:04 +01:00
|
|
|
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
|
2022-05-31 20:37:37 +02:00
|
|
|
|
|
|
|
virtual Dynarmic::HaltReason RunJit() = 0;
|
|
|
|
virtual Dynarmic::HaltReason StepJit() = 0;
|
|
|
|
virtual u32 GetSvcNumber() const = 0;
|
2022-06-06 18:56:01 +02:00
|
|
|
virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
|
|
|
|
virtual void RewindBreakpointInstruction() = 0;
|
2014-04-05 04:26:06 +02:00
|
|
|
};
|
2018-08-25 03:43:32 +02:00
|
|
|
|
|
|
|
} // namespace Core
|