chore: make yuzu REUSE compliant
[REUSE] is a specification that aims at making file copyright
information consistent, so that it can be both human and machine
readable. It basically requires that all files have a header containing
copyright and licensing information. When this isn't possible, like
when dealing with binary assets, generated files or embedded third-party
dependencies, it is permitted to insert copyright information in the
`.reuse/dep5` file.
Oh, and it also requires that all the licenses used in the project are
present in the `LICENSES` folder, that's why the diff is so huge.
This can be done automatically with `reuse download --all`.
The `reuse` tool also contains a handy subcommand that analyzes the
project and tells whether or not the project is (still) compliant,
`reuse lint`.
Following REUSE has a few advantages over the current approach:
- Copyright information is easy to access for users / downstream
- Files like `dist/license.md` do not need to exist anymore, as
`.reuse/dep5` is used instead
- `reuse lint` makes it easy to ensure that copyright information of
files like binary assets / images is always accurate and up to date
To add copyright information of files that didn't have it I looked up
who committed what and when, for each file. As yuzu contributors do not
have to sign a CLA or similar I couldn't assume that copyright ownership
was of the "yuzu Emulator Project", so I used the name and/or email of
the commit author instead.
[REUSE]: https://reuse.software
Follow-up to 01cf05bc75b1e47beb08937439f3ed9339e7b254
2022-05-15 02:06:02 +02:00
|
|
|
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-04-05 04:26:06 +02:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2018-01-09 22:33:46 +01:00
|
|
|
#include <array>
|
2022-06-06 18:56:01 +02:00
|
|
|
#include <span>
|
2023-02-05 20:22:02 +01:00
|
|
|
#include <string>
|
2019-05-18 03:43:26 +02:00
|
|
|
#include <vector>
|
2022-05-31 20:37:37 +02:00
|
|
|
|
2022-02-02 18:59:36 +01:00
|
|
|
#include "common/common_funcs.h"
|
2014-04-09 02:15:08 +02:00
|
|
|
#include "common/common_types.h"
|
2020-02-29 18:58:50 +01:00
|
|
|
#include "core/hardware_properties.h"
|
2018-09-21 01:28:48 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
#include "core/hle/kernel/svc_types.h"
|
|
|
|
|
core/cpu_core_manager: Create threads separately from initialization.
Our initialization process is a little wonky than one would expect when
it comes to code flow. We initialize the CPU last, as opposed to
hardware, where the CPU obviously needs to be first, otherwise nothing
else would work, and we have code that adds checks to get around this.
For example, in the page table setting code, we check to see if the
system is turned on before we even notify the CPU instances of a page
table switch. This results in dead code (at the moment), because the
only time a page table switch will occur is when the system is *not*
running, preventing the emulated CPU instances from being notified of a
page table switch in a convenient manner (technically the code path
could be taken, but we don't emulate the process creation svc handlers
yet).
This moves the threads creation into its own member function of the core
manager and restores a little order (and predictability) to our
initialization process.
Previously, in the multi-threaded cases, we'd kick off several threads
before even the main kernel process was created and ready to execute (gross!).
Now the initialization process is like so:
Initialization:
1. Timers
2. CPU
3. Kernel
4. Filesystem stuff (kind of gross, but can be amended trivially)
5. Applet stuff (ditto in terms of being kind of gross)
6. Main process (will be moved into the loading step in a following
change)
7. Telemetry (this should be initialized last in the future).
8. Services (4 and 5 should ideally be alongside this).
9. GDB (gross. Uses namespace scope state. Needs to be refactored into a
class or booted altogether).
10. Renderer
11. GPU (will also have its threads created in a separate step in a
following change).
Which... isn't *ideal* per-se, however getting rid of the wonky
intertwining of CPU state initialization out of this mix gets rid of
most of the footguns when it comes to our initialization process.
2019-04-09 19:25:54 +02:00
|
|
|
namespace Common {
|
|
|
|
struct PageTable;
|
|
|
|
}
|
|
|
|
|
2018-09-21 01:28:48 +02:00
|
|
|
namespace Kernel {
|
2022-06-06 18:56:01 +02:00
|
|
|
enum class DebugWatchpointType : u8;
|
|
|
|
struct DebugWatchpoint;
|
2023-11-28 20:30:39 +01:00
|
|
|
class KThread;
|
|
|
|
class KProcess;
|
2022-06-06 18:56:01 +02:00
|
|
|
} // namespace Kernel
|
2014-04-05 04:26:06 +02:00
|
|
|
|
2018-08-25 03:43:32 +02:00
|
|
|
namespace Core {
|
2022-06-06 18:56:01 +02:00
|
|
|
using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>;
|
2020-02-29 18:58:50 +01:00
|
|
|
|
2023-06-13 03:34:25 +02:00
|
|
|
// NOTE: these values match the HaltReason enum in Dynarmic
|
|
|
|
enum class HaltReason : u64 {
|
|
|
|
StepThread = 0x00000001,
|
|
|
|
DataAbort = 0x00000004,
|
|
|
|
BreakLoop = 0x02000000,
|
|
|
|
SupervisorCall = 0x04000000,
|
|
|
|
InstructionBreakpoint = 0x08000000,
|
|
|
|
PrefetchAbort = 0x20000000,
|
|
|
|
};
|
|
|
|
DECLARE_ENUM_FLAG_OPERATORS(HaltReason);
|
|
|
|
|
|
|
|
enum class Architecture {
|
2023-11-28 20:30:39 +01:00
|
|
|
AArch64,
|
|
|
|
AArch32,
|
2023-06-13 03:34:25 +02:00
|
|
|
};
|
|
|
|
|
2018-09-18 08:49:40 +02:00
|
|
|
/// Generic ARMv8 CPU interface
|
2023-11-28 20:30:39 +01:00
|
|
|
class ArmInterface {
|
2014-04-05 04:26:06 +02:00
|
|
|
public:
|
2024-03-06 06:26:38 +01:00
|
|
|
YUZU_NON_COPYABLE(ArmInterface);
|
|
|
|
YUZU_NON_MOVEABLE(ArmInterface);
|
2023-11-28 20:30:39 +01:00
|
|
|
|
|
|
|
explicit ArmInterface(bool uses_wall_clock) : m_uses_wall_clock{uses_wall_clock} {}
|
|
|
|
virtual ~ArmInterface() = default;
|
|
|
|
|
|
|
|
// Perform any backend-specific initialization.
|
2023-11-17 22:44:53 +01:00
|
|
|
virtual void Initialize() {}
|
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Runs the CPU until an event happens.
|
|
|
|
virtual HaltReason RunThread(Kernel::KThread* thread) = 0;
|
2014-05-17 17:59:18 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Runs the CPU for one instruction or until an event happens.
|
|
|
|
virtual HaltReason StepThread(Kernel::KThread* thread) = 0;
|
|
|
|
|
|
|
|
// Admits a backend-specific mechanism to lock the thread context.
|
|
|
|
virtual void LockThread(Kernel::KThread* thread) {}
|
|
|
|
virtual void UnlockThread(Kernel::KThread* thread) {}
|
|
|
|
|
|
|
|
// Clear the entire instruction cache for this CPU.
|
2016-06-27 20:38:49 +02:00
|
|
|
virtual void ClearInstructionCache() = 0;
|
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Clear a range of the instruction cache for this CPU.
|
2023-03-18 02:26:04 +01:00
|
|
|
virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
|
2020-11-14 08:20:32 +01:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Get the current architecture.
|
|
|
|
// This returns AArch64 when PSTATE.nRW == 0 and AArch32 when PSTATE.nRW == 1.
|
2023-06-13 03:34:25 +02:00
|
|
|
virtual Architecture GetArchitecture() const = 0;
|
2014-05-21 00:50:16 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Context accessors.
|
|
|
|
// These should not be called if the CPU is running.
|
|
|
|
virtual void GetContext(Kernel::Svc::ThreadContext& ctx) const = 0;
|
|
|
|
virtual void SetContext(const Kernel::Svc::ThreadContext& ctx) = 0;
|
|
|
|
virtual void SetTpidrroEl0(u64 value) = 0;
|
2018-07-16 12:24:00 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
virtual void GetSvcArguments(std::span<uint64_t, 8> args) const = 0;
|
|
|
|
virtual void SetSvcArguments(std::span<const uint64_t, 8> args) = 0;
|
|
|
|
virtual u32 GetSvcNumber() const = 0;
|
2022-04-03 17:29:05 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
void SetWatchpointArray(const WatchpointArray* watchpoints) {
|
|
|
|
m_watchpoints = watchpoints;
|
|
|
|
}
|
2022-07-08 02:06:46 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Signal an interrupt for execution to halt as soon as possible.
|
|
|
|
// It is safe to call this if the CPU is not running.
|
|
|
|
virtual void SignalInterrupt(Kernel::KThread* thread) = 0;
|
2019-05-18 03:43:26 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Stack trace generation.
|
2023-12-12 02:21:23 +01:00
|
|
|
void LogBacktrace(Kernel::KProcess* process) const;
|
2020-03-20 19:05:47 +01:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
// Debug functionality.
|
|
|
|
virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
|
|
|
|
virtual void RewindBreakpointInstruction() = 0;
|
2019-11-26 20:10:49 +01:00
|
|
|
|
|
|
|
protected:
|
2022-06-06 18:56:01 +02:00
|
|
|
const Kernel::DebugWatchpoint* MatchingWatchpoint(
|
2023-03-18 02:26:04 +01:00
|
|
|
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
|
2022-05-31 20:37:37 +02:00
|
|
|
|
2023-11-28 20:30:39 +01:00
|
|
|
protected:
|
|
|
|
const WatchpointArray* m_watchpoints{};
|
|
|
|
bool m_uses_wall_clock{};
|
2014-04-05 04:26:06 +02:00
|
|
|
};
|
2018-08-25 03:43:32 +02:00
|
|
|
|
|
|
|
} // namespace Core
|