early-access version 2439
This commit is contained in:
parent
4875565f36
commit
2f3631914f
5 changed files with 33 additions and 36 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2438.
|
This is the source code for early-access 2439.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ private:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
constexpr void ClearAffinityBit(u64& affinity, s32 core) {
|
constexpr void ClearAffinityBit(u64& affinity, s32 core) {
|
||||||
affinity &= ~(u64(1) << core);
|
affinity &= ~(UINT64_C(1) << core);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr s32 GetNextCore(u64& affinity) {
|
constexpr s32 GetNextCore(u64& affinity) {
|
||||||
|
|
|
@ -710,10 +710,7 @@ void KScheduler::Unload(KThread* thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Reload(KThread* thread) {
|
void KScheduler::Reload(KThread* thread) {
|
||||||
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
|
||||||
|
|
||||||
if (thread) {
|
|
||||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
|
@ -721,12 +718,11 @@ void KScheduler::Reload(KThread* thread) {
|
||||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||||
cpu_core.ClearExclusiveState();
|
cpu_core.ClearExclusiveState();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::SwitchContextStep2() {
|
void KScheduler::SwitchContextStep2() {
|
||||||
// Load context of new thread
|
// Load context of new thread
|
||||||
Reload(current_thread.load());
|
Reload(GetCurrentThread());
|
||||||
|
|
||||||
RescheduleCurrentCore();
|
RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
|
@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() {
|
||||||
KThread* previous_thread = GetCurrentThread();
|
KThread* previous_thread = GetCurrentThread();
|
||||||
KThread* next_thread = state.highest_priority_thread;
|
KThread* next_thread = state.highest_priority_thread;
|
||||||
|
|
||||||
state.needs_scheduling = false;
|
state.needs_scheduling.store(false);
|
||||||
|
|
||||||
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
||||||
if (next_thread == nullptr) {
|
if (next_thread == nullptr) {
|
||||||
next_thread = idle_thread;
|
next_thread = idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (next_thread->GetCurrentCore() != core_id) {
|
||||||
|
next_thread->SetCurrentCore(core_id);
|
||||||
|
}
|
||||||
|
|
||||||
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
|
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
|
||||||
if (next_thread->GetThreadType() == ThreadType::Dummy) {
|
if (next_thread->GetThreadType() == ThreadType::Dummy) {
|
||||||
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
|
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
|
||||||
|
@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next_thread->GetCurrentCore() != core_id) {
|
// Update the CPU time tracking variables.
|
||||||
next_thread->SetCurrentCore(core_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
current_thread.store(next_thread);
|
|
||||||
|
|
||||||
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
||||||
|
|
||||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||||
|
|
||||||
// Save context for previous thread
|
// Save context for previous thread
|
||||||
|
@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>* old_context;
|
std::shared_ptr<Common::Fiber>* old_context;
|
||||||
old_context = &previous_thread->GetHostContext();
|
old_context = &previous_thread->GetHostContext();
|
||||||
|
|
||||||
|
// Set the new thread.
|
||||||
|
current_thread.store(next_thread);
|
||||||
|
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||||
|
@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() {
|
||||||
do {
|
do {
|
||||||
auto next_thread = current_thread.load();
|
auto next_thread = current_thread.load();
|
||||||
if (next_thread != nullptr) {
|
if (next_thread != nullptr) {
|
||||||
next_thread->context_guard.Lock();
|
const auto locked = next_thread->context_guard.TryLock();
|
||||||
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
if (state.needs_scheduling.load()) {
|
||||||
next_thread->context_guard.Unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() {
|
||||||
next_thread->context_guard.Unlock();
|
next_thread->context_guard.Unlock();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (!locked) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
auto thread = next_thread ? next_thread : idle_thread;
|
auto thread = next_thread ? next_thread : idle_thread;
|
||||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||||
|
|
|
@ -57,16 +57,6 @@ void TranslatorVisitor::VMNMX(u64 insn) {
|
||||||
if (vmnmx.sat != 0) {
|
if (vmnmx.sat != 0) {
|
||||||
throw NotImplementedException("VMNMX SAT");
|
throw NotImplementedException("VMNMX SAT");
|
||||||
}
|
}
|
||||||
// Selectors were shown to default to 2 in unit tests
|
|
||||||
if (vmnmx.src_a_selector != 2) {
|
|
||||||
throw NotImplementedException("VMNMX Selector {}", vmnmx.src_a_selector.Value());
|
|
||||||
}
|
|
||||||
if (vmnmx.src_b_selector != 2) {
|
|
||||||
throw NotImplementedException("VMNMX Selector {}", vmnmx.src_b_selector.Value());
|
|
||||||
}
|
|
||||||
if (vmnmx.src_a_width != VideoWidth::Word) {
|
|
||||||
throw NotImplementedException("VMNMX Source Width {}", vmnmx.src_a_width.Value());
|
|
||||||
}
|
|
||||||
|
|
||||||
const bool is_b_imm{vmnmx.is_src_b_reg == 0};
|
const bool is_b_imm{vmnmx.is_src_b_reg == 0};
|
||||||
const IR::U32 src_a{GetReg8(insn)};
|
const IR::U32 src_a{GetReg8(insn)};
|
||||||
|
@ -76,10 +66,14 @@ void TranslatorVisitor::VMNMX(u64 insn) {
|
||||||
const VideoWidth a_width{vmnmx.src_a_width};
|
const VideoWidth a_width{vmnmx.src_a_width};
|
||||||
const VideoWidth b_width{GetVideoSourceWidth(vmnmx.src_b_width, is_b_imm)};
|
const VideoWidth b_width{GetVideoSourceWidth(vmnmx.src_b_width, is_b_imm)};
|
||||||
|
|
||||||
|
const u32 a_selector{static_cast<u32>(vmnmx.src_a_selector)};
|
||||||
|
// Immediate values can't have a selector
|
||||||
|
const u32 b_selector{is_b_imm ? 0U : static_cast<u32>(vmnmx.src_b_selector)};
|
||||||
|
|
||||||
const bool src_a_signed{vmnmx.src_a_sign != 0};
|
const bool src_a_signed{vmnmx.src_a_sign != 0};
|
||||||
const bool src_b_signed{vmnmx.src_b_sign != 0};
|
const bool src_b_signed{vmnmx.src_b_sign != 0};
|
||||||
const IR::U32 op_a{ExtractVideoOperandValue(ir, src_a, a_width, 0, src_a_signed)};
|
const IR::U32 op_a{ExtractVideoOperandValue(ir, src_a, a_width, a_selector, src_a_signed)};
|
||||||
const IR::U32 op_b{ExtractVideoOperandValue(ir, src_b, b_width, 0, src_b_signed)};
|
const IR::U32 op_b{ExtractVideoOperandValue(ir, src_b, b_width, b_selector, src_b_signed)};
|
||||||
|
|
||||||
// First operation's sign is only dependent on operand b's sign
|
// First operation's sign is only dependent on operand b's sign
|
||||||
const bool op_1_signed{src_b_signed};
|
const bool op_1_signed{src_b_signed};
|
||||||
|
|
|
@ -1500,6 +1500,8 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
|
||||||
// When this memory region has been joined a bunch of times, we assume it's being used
|
// When this memory region has been joined a bunch of times, we assume it's being used
|
||||||
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
||||||
has_stream_leap = true;
|
has_stream_leap = true;
|
||||||
|
begin -= PAGE_SIZE * 256;
|
||||||
|
cpu_addr = begin;
|
||||||
end += PAGE_SIZE * 256;
|
end += PAGE_SIZE * 256;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue