2018-05-01 21:21:38 -05:00
|
|
|
// Copyright 2018 yuzu emulator team
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2018-05-02 20:26:14 -05:00
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
|
|
|
|
2018-05-01 21:21:38 -05:00
|
|
|
#include "common/logging/log.h"
|
|
|
|
#ifdef ARCHITECTURE_x86_64
|
|
|
|
#include "core/arm/dynarmic/arm_dynarmic.h"
|
|
|
|
#endif
|
2018-09-17 17:15:09 -05:00
|
|
|
#include "core/arm/exclusive_monitor.h"
|
2018-05-01 21:21:38 -05:00
|
|
|
#include "core/arm/unicorn/arm_unicorn.h"
|
2019-03-04 15:02:59 -06:00
|
|
|
#include "core/core.h"
|
2018-05-01 21:21:38 -05:00
|
|
|
#include "core/core_cpu.h"
|
|
|
|
#include "core/core_timing.h"
|
|
|
|
#include "core/hle/kernel/scheduler.h"
|
|
|
|
#include "core/hle/kernel/thread.h"
|
2018-08-12 17:50:44 -05:00
|
|
|
#include "core/hle/lock.h"
|
2018-05-01 21:21:38 -05:00
|
|
|
#include "core/settings.h"
|
|
|
|
|
|
|
|
namespace Core {
|
|
|
|
|
2018-05-02 23:16:12 -05:00
|
|
|
void CpuBarrier::NotifyEnd() {
|
|
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
|
|
end = true;
|
|
|
|
condition.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CpuBarrier::Rendezvous() {
|
2018-05-02 23:34:54 -05:00
|
|
|
if (!Settings::values.use_multi_core) {
|
|
|
|
// Meaningless when running in single-core mode
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!end) {
|
2018-05-02 23:16:12 -05:00
|
|
|
std::unique_lock<std::mutex> lock(mutex);
|
|
|
|
|
|
|
|
--cores_waiting;
|
|
|
|
if (!cores_waiting) {
|
|
|
|
cores_waiting = NUM_CPU_CORES;
|
|
|
|
condition.notify_all();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
condition.wait(lock);
|
|
|
|
return true;
|
|
|
|
}
|
2018-05-02 23:34:54 -05:00
|
|
|
|
|
|
|
return false;
|
2018-05-02 23:16:12 -05:00
|
|
|
}
|
|
|
|
|
2019-03-04 15:02:59 -06:00
|
|
|
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
|
|
|
|
std::size_t core_index)
|
|
|
|
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
|
2018-05-01 21:21:38 -05:00
|
|
|
if (Settings::values.use_cpu_jit) {
|
|
|
|
#ifdef ARCHITECTURE_x86_64
|
2019-02-14 11:42:58 -06:00
|
|
|
arm_interface = std::make_unique<ARM_Dynarmic>(core_timing, exclusive_monitor, core_index);
|
2018-05-01 21:21:38 -05:00
|
|
|
#else
|
2018-09-25 15:04:53 -05:00
|
|
|
arm_interface = std::make_unique<ARM_Unicorn>();
|
2018-07-02 11:13:26 -05:00
|
|
|
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
2018-05-01 21:21:38 -05:00
|
|
|
#endif
|
|
|
|
} else {
|
2019-02-14 11:42:58 -06:00
|
|
|
arm_interface = std::make_unique<ARM_Unicorn>(core_timing);
|
2018-05-01 21:21:38 -05:00
|
|
|
}
|
|
|
|
|
2019-03-04 15:02:59 -06:00
|
|
|
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface);
|
2018-05-01 21:21:38 -05:00
|
|
|
}
|
|
|
|
|
2018-09-17 17:15:09 -05:00
|
|
|
Cpu::~Cpu() = default;
|
|
|
|
|
2018-10-15 07:53:01 -05:00
|
|
|
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) {
|
2018-07-03 08:28:46 -05:00
|
|
|
if (Settings::values.use_cpu_jit) {
|
|
|
|
#ifdef ARCHITECTURE_x86_64
|
2018-10-15 07:53:01 -05:00
|
|
|
return std::make_unique<DynarmicExclusiveMonitor>(num_cores);
|
2018-07-03 08:28:46 -05:00
|
|
|
#else
|
|
|
|
return nullptr; // TODO(merry): Passthrough exclusive monitor
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
return nullptr; // TODO(merry): Passthrough exclusive monitor
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 21:21:38 -05:00
|
|
|
void Cpu::RunLoop(bool tight_loop) {
|
2018-05-02 20:26:14 -05:00
|
|
|
// Wait for all other CPU cores to complete the previous slice, such that they run in lock-step
|
2018-10-15 07:42:06 -05:00
|
|
|
if (!cpu_barrier.Rendezvous()) {
|
2018-05-02 23:16:12 -05:00
|
|
|
// If rendezvous failed, session has been killed
|
|
|
|
return;
|
|
|
|
}
|
2018-05-02 20:26:14 -05:00
|
|
|
|
2018-05-01 21:21:38 -05:00
|
|
|
// If we don't have a currently active thread then don't execute instructions,
|
|
|
|
// instead advance to the next event and try to yield to the next thread
|
|
|
|
if (Kernel::GetCurrentThread() == nullptr) {
|
2018-07-02 11:13:26 -05:00
|
|
|
LOG_TRACE(Core, "Core-{} idling", core_index);
|
2018-05-02 20:26:14 -05:00
|
|
|
|
|
|
|
if (IsMainCore()) {
|
2018-08-12 20:41:28 -05:00
|
|
|
// TODO(Subv): Only let CoreTiming idle if all 4 cores are idling.
|
2019-02-14 11:42:58 -06:00
|
|
|
core_timing.Idle();
|
|
|
|
core_timing.Advance();
|
2018-05-02 20:26:14 -05:00
|
|
|
}
|
|
|
|
|
2018-05-01 21:21:38 -05:00
|
|
|
PrepareReschedule();
|
|
|
|
} else {
|
2018-05-02 20:26:14 -05:00
|
|
|
if (IsMainCore()) {
|
2019-02-14 11:42:58 -06:00
|
|
|
core_timing.Advance();
|
2018-05-02 20:26:14 -05:00
|
|
|
}
|
|
|
|
|
2018-05-01 21:21:38 -05:00
|
|
|
if (tight_loop) {
|
|
|
|
arm_interface->Run();
|
|
|
|
} else {
|
|
|
|
arm_interface->Step();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Reschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::SingleStep() {
|
|
|
|
return RunLoop(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::PrepareReschedule() {
|
|
|
|
arm_interface->PrepareReschedule();
|
|
|
|
reschedule_pending = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cpu::Reschedule() {
|
|
|
|
if (!reschedule_pending) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
reschedule_pending = false;
|
2018-08-12 17:50:44 -05:00
|
|
|
// Lock the global kernel mutex when we manipulate the HLE state
|
|
|
|
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
2018-05-01 21:21:38 -05:00
|
|
|
scheduler->Reschedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Core
|