mirror of
https://github.com/Lime3DS/Lime3DS
synced 2024-12-26 09:02:44 -06:00
Core timing 2.0 (#4913)
* Core::Timing: Add multiple timer, one for each core * revert clang-format; work on tests for CoreTiming * Kernel:: Add support for multiple cores, asserts in HandleSyncRequest because Thread->status == WaitIPC * Add some TRACE_LOGs * fix tests * make some adjustments to qt-debugger, cheats and gdbstub(probably still broken) * Make ARM_Interface::id private, rework ARM_Interface ctor * ReRename TimingManager to Timing for smaler diff * addressed review comments
This commit is contained in:
parent
e3dbdcbdff
commit
55ec7031cc
32 changed files with 760 additions and 535 deletions
|
@ -61,13 +61,14 @@ void RegistersWidget::OnDebugModeEntered() {
|
|||
if (!Core::System::GetInstance().IsPoweredOn())
|
||||
return;
|
||||
|
||||
// Todo: Handle all cores
|
||||
for (int i = 0; i < core_registers->childCount(); ++i)
|
||||
core_registers->child(i)->setText(
|
||||
1, QStringLiteral("0x%1").arg(Core::CPU().GetReg(i), 8, 16, QLatin1Char('0')));
|
||||
1, QStringLiteral("0x%1").arg(Core::GetCore(0).GetReg(i), 8, 16, QLatin1Char('0')));
|
||||
|
||||
for (int i = 0; i < vfp_registers->childCount(); ++i)
|
||||
vfp_registers->child(i)->setText(
|
||||
1, QStringLiteral("0x%1").arg(Core::CPU().GetVFPReg(i), 8, 16, QLatin1Char('0')));
|
||||
1, QStringLiteral("0x%1").arg(Core::GetCore(0).GetVFPReg(i), 8, 16, QLatin1Char('0')));
|
||||
|
||||
UpdateCPSRValues();
|
||||
UpdateVFPSystemRegisterValues();
|
||||
|
@ -127,7 +128,8 @@ void RegistersWidget::CreateCPSRChildren() {
|
|||
}
|
||||
|
||||
void RegistersWidget::UpdateCPSRValues() {
|
||||
const u32 cpsr_val = Core::CPU().GetCPSR();
|
||||
// Todo: Handle all cores
|
||||
const u32 cpsr_val = Core::GetCore(0).GetCPSR();
|
||||
|
||||
cpsr->setText(1, QStringLiteral("0x%1").arg(cpsr_val, 8, 16, QLatin1Char('0')));
|
||||
cpsr->child(0)->setText(
|
||||
|
@ -191,10 +193,11 @@ void RegistersWidget::CreateVFPSystemRegisterChildren() {
|
|||
}
|
||||
|
||||
void RegistersWidget::UpdateVFPSystemRegisterValues() {
|
||||
const u32 fpscr_val = Core::CPU().GetVFPSystemReg(VFP_FPSCR);
|
||||
const u32 fpexc_val = Core::CPU().GetVFPSystemReg(VFP_FPEXC);
|
||||
const u32 fpinst_val = Core::CPU().GetVFPSystemReg(VFP_FPINST);
|
||||
const u32 fpinst2_val = Core::CPU().GetVFPSystemReg(VFP_FPINST2);
|
||||
// Todo: handle all cores
|
||||
const u32 fpscr_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPSCR);
|
||||
const u32 fpexc_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPEXC);
|
||||
const u32 fpinst_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPINST);
|
||||
const u32 fpinst2_val = Core::GetCore(0).GetVFPSystemReg(VFP_FPINST2);
|
||||
|
||||
QTreeWidgetItem* const fpscr = vfp_system_registers->child(0);
|
||||
fpscr->setText(1, QStringLiteral("0x%1").arg(fpscr_val, 8, 16, QLatin1Char('0')));
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/timer.h"
|
||||
#include "core/hle/kernel/wait_object.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
WaitTreeItem::~WaitTreeItem() = default;
|
||||
|
||||
|
@ -51,12 +52,16 @@ std::size_t WaitTreeItem::Row() const {
|
|||
}
|
||||
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
|
||||
const auto& threads = Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
std::vector<std::unique_ptr<WaitTreeThread>> item_list;
|
||||
item_list.reserve(threads.size());
|
||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||
item_list.back()->row = i;
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
item_list.reserve(item_list.size() + threads.size());
|
||||
for (std::size_t i = 0; i < threads.size(); ++i) {
|
||||
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
|
||||
item_list.back()->row = i;
|
||||
}
|
||||
}
|
||||
return item_list;
|
||||
}
|
||||
|
|
|
@ -9,10 +9,13 @@
|
|||
#include "common/common_types.h"
|
||||
#include "core/arm/skyeye_common/arm_regformat.h"
|
||||
#include "core/arm/skyeye_common/vfp/asm_vfp.h"
|
||||
#include "core/core_timing.h"
|
||||
|
||||
/// Generic ARM11 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
explicit ARM_Interface(u32 id, std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: timer(timer), id(id){};
|
||||
virtual ~ARM_Interface() {}
|
||||
|
||||
class ThreadContext {
|
||||
|
@ -172,4 +175,18 @@ public:
|
|||
|
||||
/// Prepare core for thread reschedule (if needed to correctly handle state)
|
||||
virtual void PrepareReschedule() = 0;
|
||||
|
||||
std::shared_ptr<Core::Timing::Timer> GetTimer() {
|
||||
return timer;
|
||||
}
|
||||
|
||||
u32 GetID() const {
|
||||
return id;
|
||||
}
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Core::Timing::Timer> timer;
|
||||
|
||||
private:
|
||||
u32 id;
|
||||
};
|
||||
|
|
|
@ -72,8 +72,7 @@ private:
|
|||
class DynarmicUserCallbacks final : public Dynarmic::A32::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicUserCallbacks(ARM_Dynarmic& parent)
|
||||
: parent(parent), timing(parent.system.CoreTiming()), svc_context(parent.system),
|
||||
memory(parent.memory) {}
|
||||
: parent(parent), svc_context(parent.system), memory(parent.memory) {}
|
||||
~DynarmicUserCallbacks() = default;
|
||||
|
||||
std::uint8_t MemoryRead8(VAddr vaddr) override {
|
||||
|
@ -137,7 +136,7 @@ public:
|
|||
parent.jit->HaltExecution();
|
||||
parent.SetPC(pc);
|
||||
Kernel::Thread* thread =
|
||||
parent.system.Kernel().GetThreadManager().GetCurrentThread();
|
||||
parent.system.Kernel().GetCurrentThreadManager().GetCurrentThread();
|
||||
parent.SaveContext(thread->context);
|
||||
GDBStub::Break();
|
||||
GDBStub::SendTrap(thread, 5);
|
||||
|
@ -150,22 +149,23 @@ public:
|
|||
}
|
||||
|
||||
void AddTicks(std::uint64_t ticks) override {
|
||||
timing.AddTicks(ticks);
|
||||
parent.GetTimer()->AddTicks(ticks);
|
||||
}
|
||||
std::uint64_t GetTicksRemaining() override {
|
||||
s64 ticks = timing.GetDowncount();
|
||||
s64 ticks = parent.GetTimer()->GetDowncount();
|
||||
return static_cast<u64>(ticks <= 0 ? 0 : ticks);
|
||||
}
|
||||
|
||||
ARM_Dynarmic& parent;
|
||||
Core::Timing& timing;
|
||||
Kernel::SVCContext svc_context;
|
||||
Memory::MemorySystem& memory;
|
||||
};
|
||||
|
||||
ARM_Dynarmic::ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode)
|
||||
: system(*system), memory(memory), cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: ARM_Interface(id, timer), system(*system), memory(memory),
|
||||
cb(std::make_unique<DynarmicUserCallbacks>(*this)) {
|
||||
interpreter_state = std::make_shared<ARMul_State>(system, memory, initial_mode);
|
||||
PageTableChanged();
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ class DynarmicUserCallbacks;
|
|||
|
||||
class ARM_Dynarmic final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory, PrivilegeMode initial_mode);
|
||||
ARM_Dynarmic(Core::System* system, Memory::MemorySystem& memory, PrivilegeMode initial_mode,
|
||||
u32 id, std::shared_ptr<Core::Timing::Timer> timer);
|
||||
~ARM_Dynarmic() override;
|
||||
|
||||
void Run() override;
|
||||
|
|
|
@ -69,8 +69,9 @@ private:
|
|||
};
|
||||
|
||||
ARM_DynCom::ARM_DynCom(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode)
|
||||
: system(system) {
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer)
|
||||
: ARM_Interface(id, timer), system(system) {
|
||||
state = std::make_unique<ARMul_State>(system, memory, initial_mode);
|
||||
}
|
||||
|
||||
|
@ -78,7 +79,7 @@ ARM_DynCom::~ARM_DynCom() {}
|
|||
|
||||
void ARM_DynCom::Run() {
|
||||
DEBUG_ASSERT(system != nullptr);
|
||||
ExecuteInstructions(std::max<s64>(system->CoreTiming().GetDowncount(), 0));
|
||||
ExecuteInstructions(std::max<s64>(timer->GetDowncount(), 0));
|
||||
}
|
||||
|
||||
void ARM_DynCom::Step() {
|
||||
|
@ -150,7 +151,7 @@ void ARM_DynCom::ExecuteInstructions(u64 num_instructions) {
|
|||
state->NumInstrsToExecute = num_instructions;
|
||||
unsigned ticks_executed = InterpreterMainLoop(state.get());
|
||||
if (system != nullptr) {
|
||||
system->CoreTiming().AddTicks(ticks_executed);
|
||||
timer->AddTicks(ticks_executed);
|
||||
}
|
||||
state->ServeBreak();
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ class MemorySystem;
|
|||
class ARM_DynCom final : public ARM_Interface {
|
||||
public:
|
||||
explicit ARM_DynCom(Core::System* system, Memory::MemorySystem& memory,
|
||||
PrivilegeMode initial_mode);
|
||||
PrivilegeMode initial_mode, u32 id,
|
||||
std::shared_ptr<Core::Timing::Timer> timer);
|
||||
~ARM_DynCom() override;
|
||||
|
||||
void Run() override;
|
||||
|
|
|
@ -3865,7 +3865,7 @@ SWI_INST : {
|
|||
if (inst_base->cond == ConditionCode::AL || CondPassed(cpu, inst_base->cond)) {
|
||||
DEBUG_ASSERT(cpu->system != nullptr);
|
||||
swi_inst* const inst_cream = (swi_inst*)inst_base->component;
|
||||
cpu->system->CoreTiming().AddTicks(num_instrs);
|
||||
cpu->system->GetRunningCore().GetTimer()->AddTicks(num_instrs);
|
||||
cpu->NumInstrsToExecute =
|
||||
num_instrs >= cpu->NumInstrsToExecute ? 0 : cpu->NumInstrsToExecute - num_instrs;
|
||||
num_instrs = 0;
|
||||
|
|
|
@ -607,8 +607,8 @@ void ARMul_State::ServeBreak() {
|
|||
}
|
||||
|
||||
DEBUG_ASSERT(system != nullptr);
|
||||
Kernel::Thread* thread = system->Kernel().GetThreadManager().GetCurrentThread();
|
||||
system->CPU().SaveContext(thread->context);
|
||||
Kernel::Thread* thread = system->Kernel().GetCurrentThreadManager().GetCurrentThread();
|
||||
system->GetRunningCore().SaveContext(thread->context);
|
||||
|
||||
if (last_bkpt_hit || GDBStub::IsMemoryBreak() || GDBStub::GetCpuStepFlag()) {
|
||||
last_bkpt_hit = false;
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline std::enable_if_t<std::is_integral_v<T>> WriteOp(const GatewayCheat
|
|||
Core::System& system) {
|
||||
u32 addr = line.address + state.offset;
|
||||
write_func(addr, static_cast<T>(line.value));
|
||||
system.CPU().InvalidateCacheRange(addr, sizeof(T));
|
||||
system.InvalidateCacheRange(addr, sizeof(T));
|
||||
}
|
||||
|
||||
template <typename T, typename ReadFunction, typename CompareFunc>
|
||||
|
@ -105,7 +105,7 @@ static inline std::enable_if_t<std::is_integral_v<T>> IncrementiveWriteOp(
|
|||
Core::System& system) {
|
||||
u32 addr = line.value + state.offset;
|
||||
write_func(addr, static_cast<T>(state.reg));
|
||||
system.CPU().InvalidateCacheRange(addr, sizeof(T));
|
||||
system.InvalidateCacheRange(addr, sizeof(T));
|
||||
state.offset += sizeof(T);
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,8 @@ static inline void PatchOp(const GatewayCheat::CheatLine& line, State& state, Co
|
|||
}
|
||||
u32 num_bytes = line.value;
|
||||
u32 addr = line.address + state.offset;
|
||||
system.CPU().InvalidateCacheRange(addr, num_bytes);
|
||||
system.InvalidateCacheRange(addr, num_bytes);
|
||||
|
||||
bool first = true;
|
||||
u32 bit_offset = 0;
|
||||
if (num_bytes > 0)
|
||||
|
|
|
@ -44,7 +44,8 @@ namespace Core {
|
|||
|
||||
System::ResultStatus System::RunLoop(bool tight_loop) {
|
||||
status = ResultStatus::Success;
|
||||
if (!cpu_core) {
|
||||
if (std::any_of(cpu_cores.begin(), cpu_cores.end(),
|
||||
[](std::shared_ptr<ARM_Interface> ptr) { return ptr == nullptr; })) {
|
||||
return ResultStatus::ErrorNotInitialized;
|
||||
}
|
||||
|
||||
|
@ -62,22 +63,73 @@ System::ResultStatus System::RunLoop(bool tight_loop) {
|
|||
}
|
||||
}
|
||||
|
||||
// If we don't have a currently active thread then don't execute instructions,
|
||||
// instead advance to the next event and try to yield to the next thread
|
||||
if (kernel->GetThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Idling");
|
||||
timing->Idle();
|
||||
timing->Advance();
|
||||
PrepareReschedule();
|
||||
} else {
|
||||
timing->Advance();
|
||||
if (tight_loop) {
|
||||
cpu_core->Run();
|
||||
} else {
|
||||
cpu_core->Step();
|
||||
// All cores should have executed the same amount of ticks. If this is not the case an event was
|
||||
// scheduled with a cycles_into_future smaller then the current downcount.
|
||||
// So we have to get those cores to the same global time first
|
||||
u64 global_ticks = timing->GetGlobalTicks();
|
||||
s64 max_delay = 0;
|
||||
std::shared_ptr<ARM_Interface> current_core_to_execute = nullptr;
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
if (cpu_core->GetTimer()->GetTicks() < global_ticks) {
|
||||
s64 delay = global_ticks - cpu_core->GetTimer()->GetTicks();
|
||||
cpu_core->GetTimer()->Advance(delay);
|
||||
if (max_delay < delay) {
|
||||
max_delay = delay;
|
||||
current_core_to_execute = cpu_core;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (max_delay > 0) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} running (delayed) for {} ticks",
|
||||
current_core_to_execute->GetID(),
|
||||
current_core_to_execute->GetTimer()->GetDowncount());
|
||||
running_core = current_core_to_execute.get();
|
||||
kernel->SetRunningCPU(current_core_to_execute);
|
||||
if (kernel->GetCurrentThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} idling", current_core_to_execute->GetID());
|
||||
current_core_to_execute->GetTimer()->Idle();
|
||||
PrepareReschedule();
|
||||
} else {
|
||||
if (tight_loop) {
|
||||
current_core_to_execute->Run();
|
||||
} else {
|
||||
current_core_to_execute->Step();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Now all cores are at the same global time. So we will run them one after the other
|
||||
// with a max slice that is the minimum of all max slices of all cores
|
||||
// TODO: Make special check for idle since we can easily revert the time of idle cores
|
||||
s64 max_slice = Timing::MAX_SLICE_LENGTH;
|
||||
for (const auto& cpu_core : cpu_cores) {
|
||||
max_slice = std::min(max_slice, cpu_core->GetTimer()->GetMaxSliceLength());
|
||||
}
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
cpu_core->GetTimer()->Advance(max_slice);
|
||||
}
|
||||
for (auto& cpu_core : cpu_cores) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} running for {} ticks", cpu_core->GetID(),
|
||||
cpu_core->GetTimer()->GetDowncount());
|
||||
running_core = cpu_core.get();
|
||||
kernel->SetRunningCPU(cpu_core);
|
||||
// If we don't have a currently active thread then don't execute instructions,
|
||||
// instead advance to the next event and try to yield to the next thread
|
||||
if (kernel->GetCurrentThreadManager().GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core_ARM11, "Core {} idling", cpu_core->GetID());
|
||||
cpu_core->GetTimer()->Idle();
|
||||
PrepareReschedule();
|
||||
} else {
|
||||
if (tight_loop) {
|
||||
cpu_core->Run();
|
||||
} else {
|
||||
cpu_core->Step();
|
||||
}
|
||||
}
|
||||
}
|
||||
timing->AddToGlobalTicks(max_slice);
|
||||
}
|
||||
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
GDBStub::SetCpuStepFlag(false);
|
||||
}
|
||||
|
@ -174,7 +226,7 @@ System::ResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::st
|
|||
}
|
||||
|
||||
void System::PrepareReschedule() {
|
||||
cpu_core->PrepareReschedule();
|
||||
running_core->PrepareReschedule();
|
||||
reschedule_pending = true;
|
||||
}
|
||||
|
||||
|
@ -188,31 +240,50 @@ void System::Reschedule() {
|
|||
}
|
||||
|
||||
reschedule_pending = false;
|
||||
kernel->GetThreadManager().Reschedule();
|
||||
for (const auto& core : cpu_cores) {
|
||||
LOG_TRACE(Core_ARM11, "Reschedule core {}", core->GetID());
|
||||
kernel->GetThreadManager(core->GetID()).Reschedule();
|
||||
}
|
||||
}
|
||||
|
||||
System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mode) {
|
||||
LOG_DEBUG(HW_Memory, "initialized OK");
|
||||
|
||||
std::size_t num_cores = 2;
|
||||
if (Settings::values.is_new_3ds) {
|
||||
num_cores = 4;
|
||||
}
|
||||
|
||||
memory = std::make_unique<Memory::MemorySystem>();
|
||||
|
||||
timing = std::make_unique<Timing>();
|
||||
timing = std::make_unique<Timing>(num_cores);
|
||||
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing,
|
||||
[this] { PrepareReschedule(); }, system_mode);
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(
|
||||
*memory, *timing, [this] { PrepareReschedule(); }, system_mode, num_cores);
|
||||
|
||||
if (Settings::values.use_cpu_jit) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
cpu_core = std::make_shared<ARM_Dynarmic>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_Dynarmic>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
#else
|
||||
cpu_core = std::make_shared<ARM_DynCom>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_DynCom>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
||||
#endif
|
||||
} else {
|
||||
cpu_core = std::make_shared<ARM_DynCom>(this, *memory, USER32MODE);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
cpu_cores.push_back(
|
||||
std::make_shared<ARM_DynCom>(this, *memory, USER32MODE, i, timing->GetTimer(i)));
|
||||
}
|
||||
}
|
||||
running_core = cpu_cores[0].get();
|
||||
|
||||
kernel->SetCPU(cpu_core);
|
||||
kernel->SetCPUs(cpu_cores);
|
||||
kernel->SetRunningCPU(cpu_cores[0]);
|
||||
|
||||
if (Settings::values.enable_dsp_lle) {
|
||||
dsp_core = std::make_unique<AudioCore::DspLle>(*memory,
|
||||
|
@ -257,6 +328,8 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window, u32 system_mo
|
|||
|
||||
LOG_DEBUG(Core, "Initialized OK");
|
||||
|
||||
initalized = true;
|
||||
|
||||
return ResultStatus::Success;
|
||||
}
|
||||
|
||||
|
@ -362,7 +435,7 @@ void System::Shutdown() {
|
|||
cheat_engine.reset();
|
||||
service_manager.reset();
|
||||
dsp_core.reset();
|
||||
cpu_core.reset();
|
||||
cpu_cores.clear();
|
||||
kernel.reset();
|
||||
timing.reset();
|
||||
app_loader.reset();
|
||||
|
|
|
@ -140,7 +140,10 @@ public:
|
|||
* @returns True if the emulated system is powered on, otherwise false.
|
||||
*/
|
||||
bool IsPoweredOn() const {
|
||||
return cpu_core != nullptr;
|
||||
return cpu_cores.size() > 0 &&
|
||||
std::all_of(cpu_cores.begin(), cpu_cores.end(),
|
||||
[](std::shared_ptr<ARM_Interface> ptr) { return ptr != nullptr; });
|
||||
;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -160,8 +163,29 @@ public:
|
|||
* Gets a reference to the emulated CPU.
|
||||
* @returns A reference to the emulated CPU.
|
||||
*/
|
||||
ARM_Interface& CPU() {
|
||||
return *cpu_core;
|
||||
|
||||
ARM_Interface& GetRunningCore() {
|
||||
return *running_core;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets a reference to the emulated CPU.
|
||||
* @param core_id The id of the core requested.
|
||||
* @returns A reference to the emulated CPU.
|
||||
*/
|
||||
|
||||
ARM_Interface& GetCore(u32 core_id) {
|
||||
return *cpu_cores[core_id];
|
||||
};
|
||||
|
||||
u32 GetNumCores() const {
|
||||
return cpu_cores.size();
|
||||
}
|
||||
|
||||
void InvalidateCacheRange(u32 start_address, std::size_t length) {
|
||||
for (const auto& cpu : cpu_cores) {
|
||||
cpu->InvalidateCacheRange(start_address, length);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -288,7 +312,8 @@ private:
|
|||
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||
|
||||
/// ARM11 CPU core
|
||||
std::shared_ptr<ARM_Interface> cpu_core;
|
||||
std::vector<std::shared_ptr<ARM_Interface>> cpu_cores;
|
||||
ARM_Interface* running_core = nullptr;
|
||||
|
||||
/// DSP core
|
||||
std::unique_ptr<AudioCore::DspInterface> dsp_core;
|
||||
|
@ -330,6 +355,8 @@ private:
|
|||
private:
|
||||
static System s_instance;
|
||||
|
||||
bool initalized = false;
|
||||
|
||||
ResultStatus status = ResultStatus::Success;
|
||||
std::string status_details = "";
|
||||
/// Saved variables for reset
|
||||
|
@ -340,8 +367,16 @@ private:
|
|||
std::atomic<bool> shutdown_requested;
|
||||
};
|
||||
|
||||
inline ARM_Interface& CPU() {
|
||||
return System::GetInstance().CPU();
|
||||
inline ARM_Interface& GetRunningCore() {
|
||||
return System::GetInstance().GetRunningCore();
|
||||
}
|
||||
|
||||
inline ARM_Interface& GetCore(u32 core_id) {
|
||||
return System::GetInstance().GetCore(core_id);
|
||||
}
|
||||
|
||||
inline u32 GetNumCores() {
|
||||
return System::GetInstance().GetNumCores();
|
||||
}
|
||||
|
||||
inline AudioCore::DspInterface& DSP() {
|
||||
|
|
|
@ -12,14 +12,22 @@
|
|||
namespace Core {
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by the order added to the queue
|
||||
bool Timing::Event::operator>(const Event& right) const {
|
||||
bool Timing::Event::operator>(const Timing::Event& right) const {
|
||||
return std::tie(time, fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
bool Timing::Event::operator<(const Event& right) const {
|
||||
bool Timing::Event::operator<(const Timing::Event& right) const {
|
||||
return std::tie(time, fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
Timing::Timing(std::size_t num_cores) {
|
||||
timers.resize(num_cores);
|
||||
for (std::size_t i = 0; i < num_cores; ++i) {
|
||||
timers[i] = std::make_shared<Timer>();
|
||||
}
|
||||
current_timer = timers[0];
|
||||
}
|
||||
|
||||
TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback callback) {
|
||||
// check for existing type with same name.
|
||||
// we want event type names to remain unique so that we can use them for serialization.
|
||||
|
@ -34,73 +42,102 @@ TimingEventType* Timing::RegisterEvent(const std::string& name, TimedCallback ca
|
|||
return event_type;
|
||||
}
|
||||
|
||||
Timing::~Timing() {
|
||||
void Timing::ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata,
|
||||
std::size_t core_id) {
|
||||
ASSERT(event_type != nullptr);
|
||||
std::shared_ptr<Timing::Timer> timer;
|
||||
if (core_id == std::numeric_limits<std::size_t>::max()) {
|
||||
timer = current_timer;
|
||||
} else {
|
||||
ASSERT(core_id < timers.size());
|
||||
timer = timers.at(core_id);
|
||||
}
|
||||
|
||||
s64 timeout = timer->GetTicks() + cycles_into_future;
|
||||
if (current_timer == timer) {
|
||||
// If this event needs to be scheduled before the next advance(), force one early
|
||||
if (!timer->is_timer_sane)
|
||||
timer->ForceExceptionCheck(cycles_into_future);
|
||||
|
||||
timer->event_queue.emplace_back(
|
||||
Event{timeout, timer->event_fifo_id++, userdata, event_type});
|
||||
std::push_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
} else {
|
||||
timer->ts_queue.Push(Event{static_cast<s64>(timer->GetTicks() + cycles_into_future), 0,
|
||||
userdata, event_type});
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::UnscheduleEvent(const TimingEventType* event_type, u64 userdata) {
|
||||
for (auto timer : timers) {
|
||||
auto itr = std::remove_if(
|
||||
timer->event_queue.begin(), timer->event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type && e.userdata == userdata; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != timer->event_queue.end()) {
|
||||
timer->event_queue.erase(itr, timer->event_queue.end());
|
||||
std::make_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
// TODO:remove events from ts_queue
|
||||
}
|
||||
|
||||
void Timing::RemoveEvent(const TimingEventType* event_type) {
|
||||
for (auto timer : timers) {
|
||||
auto itr = std::remove_if(timer->event_queue.begin(), timer->event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != timer->event_queue.end()) {
|
||||
timer->event_queue.erase(itr, timer->event_queue.end());
|
||||
std::make_heap(timer->event_queue.begin(), timer->event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
// TODO:remove events from ts_queue
|
||||
}
|
||||
|
||||
void Timing::SetCurrentTimer(std::size_t core_id) {
|
||||
current_timer = timers[core_id];
|
||||
}
|
||||
|
||||
s64 Timing::GetTicks() const {
|
||||
return current_timer->GetTicks();
|
||||
}
|
||||
|
||||
s64 Timing::GetGlobalTicks() const {
|
||||
return global_timer;
|
||||
}
|
||||
|
||||
std::chrono::microseconds Timing::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE_ARM11};
|
||||
}
|
||||
|
||||
std::shared_ptr<Timing::Timer> Timing::GetTimer(std::size_t cpu_id) {
|
||||
return timers[cpu_id];
|
||||
}
|
||||
|
||||
Timing::Timer::~Timer() {
|
||||
MoveEvents();
|
||||
}
|
||||
|
||||
u64 Timing::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(global_timer);
|
||||
if (!is_global_timer_sane) {
|
||||
u64 Timing::Timer::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(executed_ticks);
|
||||
if (!is_timer_sane) {
|
||||
ticks += slice_length - downcount;
|
||||
}
|
||||
return ticks;
|
||||
}
|
||||
|
||||
void Timing::AddTicks(u64 ticks) {
|
||||
void Timing::Timer::AddTicks(u64 ticks) {
|
||||
downcount -= ticks;
|
||||
}
|
||||
|
||||
u64 Timing::GetIdleTicks() const {
|
||||
u64 Timing::Timer::GetIdleTicks() const {
|
||||
return static_cast<u64>(idled_cycles);
|
||||
}
|
||||
|
||||
void Timing::ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata) {
|
||||
ASSERT(event_type != nullptr);
|
||||
s64 timeout = GetTicks() + cycles_into_future;
|
||||
|
||||
// If this event needs to be scheduled before the next advance(), force one early
|
||||
if (!is_global_timer_sane)
|
||||
ForceExceptionCheck(cycles_into_future);
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
|
||||
void Timing::ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata) {
|
||||
ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
|
||||
}
|
||||
|
||||
void Timing::UnscheduleEvent(const TimingEventType* event_type, u64 userdata) {
|
||||
auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type == event_type && e.userdata == userdata;
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::RemoveEvent(const TimingEventType* event_type) {
|
||||
auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
void Timing::RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type) {
|
||||
MoveEvents();
|
||||
RemoveEvent(event_type);
|
||||
}
|
||||
|
||||
void Timing::ForceExceptionCheck(s64 cycles) {
|
||||
void Timing::Timer::ForceExceptionCheck(s64 cycles) {
|
||||
cycles = std::max<s64>(0, cycles);
|
||||
if (downcount > cycles) {
|
||||
slice_length -= downcount - cycles;
|
||||
|
@ -108,7 +145,7 @@ void Timing::ForceExceptionCheck(s64 cycles) {
|
|||
}
|
||||
}
|
||||
|
||||
void Timing::MoveEvents() {
|
||||
void Timing::Timer::MoveEvents() {
|
||||
for (Event ev; ts_queue.Pop(ev);) {
|
||||
ev.fifo_order = event_fifo_id++;
|
||||
event_queue.emplace_back(std::move(ev));
|
||||
|
@ -116,43 +153,49 @@ void Timing::MoveEvents() {
|
|||
}
|
||||
}
|
||||
|
||||
void Timing::Advance() {
|
||||
s64 Timing::Timer::GetMaxSliceLength() const {
|
||||
auto next_event = std::find_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.time - executed_ticks > 0; });
|
||||
if (next_event != event_queue.end()) {
|
||||
return next_event->time - executed_ticks;
|
||||
}
|
||||
return MAX_SLICE_LENGTH;
|
||||
}
|
||||
|
||||
void Timing::Timer::Advance(s64 max_slice_length) {
|
||||
MoveEvents();
|
||||
|
||||
s64 cycles_executed = slice_length - downcount;
|
||||
global_timer += cycles_executed;
|
||||
slice_length = MAX_SLICE_LENGTH;
|
||||
idled_cycles = 0;
|
||||
executed_ticks += cycles_executed;
|
||||
slice_length = max_slice_length;
|
||||
|
||||
is_global_timer_sane = true;
|
||||
is_timer_sane = true;
|
||||
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
while (!event_queue.empty() && event_queue.front().time <= executed_ticks) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
evt.type->callback(evt.userdata, global_timer - evt.time);
|
||||
evt.type->callback(evt.userdata, executed_ticks - evt.time);
|
||||
}
|
||||
|
||||
is_global_timer_sane = false;
|
||||
is_timer_sane = false;
|
||||
|
||||
// Still events left (scheduled in the future)
|
||||
if (!event_queue.empty()) {
|
||||
slice_length = static_cast<int>(
|
||||
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH));
|
||||
std::min<s64>(event_queue.front().time - executed_ticks, max_slice_length));
|
||||
}
|
||||
|
||||
downcount = slice_length;
|
||||
}
|
||||
|
||||
void Timing::Idle() {
|
||||
void Timing::Timer::Idle() {
|
||||
idled_cycles += downcount;
|
||||
downcount = 0;
|
||||
}
|
||||
|
||||
std::chrono::microseconds Timing::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE_ARM11};
|
||||
}
|
||||
|
||||
s64 Timing::GetDowncount() const {
|
||||
s64 Timing::Timer::GetDowncount() const {
|
||||
return downcount;
|
||||
}
|
||||
|
||||
|
|
|
@ -134,62 +134,6 @@ struct TimingEventType {
|
|||
|
||||
class Timing {
|
||||
public:
|
||||
~Timing();
|
||||
|
||||
/**
|
||||
* This should only be called from the emu thread, if you are calling it any other thread, you
|
||||
* are doing something evil
|
||||
*/
|
||||
u64 GetTicks() const;
|
||||
u64 GetIdleTicks() const;
|
||||
void AddTicks(u64 ticks);
|
||||
|
||||
/**
|
||||
* Returns the event_type identifier. if name is not unique, it will assert.
|
||||
*/
|
||||
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
/**
|
||||
* After the first Advance, the slice lengths and the downcount will be reduced whenever an
|
||||
* event is scheduled earlier than the current values. Scheduling from a callback will not
|
||||
* update the downcount until the Advance() completes.
|
||||
*/
|
||||
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0);
|
||||
|
||||
/**
|
||||
* This is to be called when outside of hle threads, such as the graphics thread, wants to
|
||||
* schedule things to be executed on the main thread.
|
||||
* Not that this doesn't change slice_length and thus events scheduled by this might be called
|
||||
* with a delay of up to MAX_SLICE_LENGTH
|
||||
*/
|
||||
void ScheduleEventThreadsafe(s64 cycles_into_future, const TimingEventType* event_type,
|
||||
u64 userdata);
|
||||
|
||||
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const TimingEventType* event_type);
|
||||
void RemoveNormalAndThreadsafeEvent(const TimingEventType* event_type);
|
||||
|
||||
/** Advance must be called at the beginning of dispatcher loops, not the end. Advance() ends
|
||||
* the previous timing slice and begins the next one, you must Advance from the previous
|
||||
* slice to the current one before executing any cycles. CoreTiming starts in slice -1 so an
|
||||
* Advance() is required to initialize the slice length before the first cycle of emulated
|
||||
* instructions is executed.
|
||||
*/
|
||||
void Advance();
|
||||
void MoveEvents();
|
||||
|
||||
/// Pretend that the main CPU has executed enough cycles to reach the next event.
|
||||
void Idle();
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
s64 GetDowncount() const;
|
||||
|
||||
private:
|
||||
struct Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
|
@ -202,33 +146,93 @@ private:
|
|||
|
||||
static constexpr int MAX_SLICE_LENGTH = 20000;
|
||||
|
||||
class Timer {
|
||||
public:
|
||||
~Timer();
|
||||
|
||||
s64 GetMaxSliceLength() const;
|
||||
|
||||
void Advance(s64 max_slice_length = MAX_SLICE_LENGTH);
|
||||
|
||||
void Idle();
|
||||
|
||||
u64 GetTicks() const;
|
||||
u64 GetIdleTicks() const;
|
||||
|
||||
void AddTicks(u64 ticks);
|
||||
|
||||
s64 GetDowncount() const;
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
void MoveEvents();
|
||||
|
||||
private:
|
||||
friend class Timing;
|
||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||
// accomodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
// the queue for storing the events from other threads threadsafe until they will be added
|
||||
// to the event_queue by the emu thread
|
||||
Common::MPSCQueue<Event> ts_queue;
|
||||
// Are we in a function that has been called from Advance()
|
||||
// If events are sheduled from a function that gets called from Advance(),
|
||||
// don't change slice_length and downcount.
|
||||
// The time between CoreTiming being intialized and the first call to Advance() is
|
||||
// considered the slice boundary between slice -1 and slice 0. Dispatcher loops must call
|
||||
// Advance() before executing the first cycle of each slice to prepare the slice length and
|
||||
// downcount for that slice.
|
||||
bool is_timer_sane = true;
|
||||
|
||||
s64 slice_length = MAX_SLICE_LENGTH;
|
||||
s64 downcount = MAX_SLICE_LENGTH;
|
||||
s64 executed_ticks = 0;
|
||||
u64 idled_cycles;
|
||||
};
|
||||
|
||||
explicit Timing(std::size_t num_cores);
|
||||
|
||||
~Timing(){};
|
||||
|
||||
/**
|
||||
* Returns the event_type identifier. if name is not unique, it will assert.
|
||||
*/
|
||||
TimingEventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
void ScheduleEvent(s64 cycles_into_future, const TimingEventType* event_type, u64 userdata = 0,
|
||||
std::size_t core_id = std::numeric_limits<std::size_t>::max());
|
||||
|
||||
void UnscheduleEvent(const TimingEventType* event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const TimingEventType* event_type);
|
||||
|
||||
void SetCurrentTimer(std::size_t core_id);
|
||||
|
||||
s64 GetTicks() const;
|
||||
|
||||
s64 GetGlobalTicks() const;
|
||||
|
||||
void AddToGlobalTicks(s64 ticks) {
|
||||
global_timer += ticks;
|
||||
}
|
||||
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
std::shared_ptr<Timer> GetTimer(std::size_t cpu_id);
|
||||
|
||||
private:
|
||||
s64 global_timer = 0;
|
||||
s64 slice_length = MAX_SLICE_LENGTH;
|
||||
s64 downcount = MAX_SLICE_LENGTH;
|
||||
|
||||
// unordered_map stores each element separately as a linked list node so pointers to
|
||||
// elements remain stable regardless of rehashes/resizing.
|
||||
std::unordered_map<std::string, TimingEventType> event_types;
|
||||
|
||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||
// accomodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
// the queue for storing the events from other threads threadsafe until they will be added
|
||||
// to the event_queue by the emu thread
|
||||
Common::MPSCQueue<Event> ts_queue;
|
||||
s64 idled_cycles = 0;
|
||||
|
||||
// Are we in a function that has been called from Advance()
|
||||
// If events are sheduled from a function that gets called from Advance(),
|
||||
// don't change slice_length and downcount.
|
||||
// The time between CoreTiming being intialized and the first call to Advance() is considered
|
||||
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
|
||||
// executing the first cycle of each slice to prepare the slice length and downcount for
|
||||
// that slice.
|
||||
bool is_global_timer_sane = true;
|
||||
std::vector<std::shared_ptr<Timer>> timers;
|
||||
std::shared_ptr<Timer> current_timer;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -160,10 +160,14 @@ BreakpointMap breakpoints_write;
|
|||
} // Anonymous namespace
|
||||
|
||||
static Kernel::Thread* FindThreadById(int id) {
|
||||
const auto& threads = Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadId() == static_cast<u32>(id)) {
|
||||
return thread.get();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadId() == static_cast<u32>(id)) {
|
||||
return thread.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -414,7 +418,10 @@ static void RemoveBreakpoint(BreakpointType type, VAddr addr) {
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), bp->second.addr,
|
||||
bp->second.inst.data(), bp->second.inst.size());
|
||||
Core::CPU().ClearInstructionCache();
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
Core::GetCore(i).ClearInstructionCache();
|
||||
}
|
||||
}
|
||||
p.erase(addr);
|
||||
}
|
||||
|
@ -540,10 +547,13 @@ static void HandleQuery() {
|
|||
SendReply(target_xml);
|
||||
} else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
|
||||
std::string val = "m";
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x},", thread->GetThreadId());
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x},", thread->GetThreadId());
|
||||
}
|
||||
}
|
||||
val.pop_back();
|
||||
SendReply(val.c_str());
|
||||
|
@ -553,11 +563,14 @@ static void HandleQuery() {
|
|||
std::string buffer;
|
||||
buffer += "l<?xml version=\"1.0\"?>";
|
||||
buffer += "<threads>";
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager().GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
buffer += fmt::format(R"*(<thread id="{:x}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadId(), thread->GetThreadId());
|
||||
u32 num_cores = Core::GetNumCores();
|
||||
for (u32 i = 0; i < num_cores; ++i) {
|
||||
const auto& threads =
|
||||
Core::System::GetInstance().Kernel().GetThreadManager(i).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
buffer += fmt::format(R"*(<thread id="{:x}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadId(), thread->GetThreadId());
|
||||
}
|
||||
}
|
||||
buffer += "</threads>";
|
||||
SendReply(buffer.c_str());
|
||||
|
@ -619,9 +632,9 @@ static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) {
|
|||
if (full) {
|
||||
|
||||
buffer = fmt::format("T{:02x}{:02x}:{:08x};{:02x}:{:08x};{:02x}:{:08x}", latest_signal,
|
||||
PC_REGISTER, htonl(Core::CPU().GetPC()), SP_REGISTER,
|
||||
htonl(Core::CPU().GetReg(SP_REGISTER)), LR_REGISTER,
|
||||
htonl(Core::CPU().GetReg(LR_REGISTER)));
|
||||
PC_REGISTER, htonl(Core::GetRunningCore().GetPC()), SP_REGISTER,
|
||||
htonl(Core::GetRunningCore().GetReg(SP_REGISTER)), LR_REGISTER,
|
||||
htonl(Core::GetRunningCore().GetReg(LR_REGISTER)));
|
||||
} else {
|
||||
buffer = fmt::format("T{:02x}", latest_signal);
|
||||
}
|
||||
|
@ -782,7 +795,7 @@ static void WriteRegister() {
|
|||
return SendReply("E01");
|
||||
}
|
||||
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -812,7 +825,7 @@ static void WriteRegisters() {
|
|||
}
|
||||
}
|
||||
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
|
||||
SendReply("OK");
|
||||
}
|
||||
|
@ -869,7 +882,7 @@ static void WriteMemory() {
|
|||
GdbHexToMem(data.data(), len_pos + 1, len);
|
||||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), addr, data.data(), len);
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
SendReply("OK");
|
||||
}
|
||||
|
||||
|
@ -883,12 +896,12 @@ void Break(bool is_memory_break) {
|
|||
static void Step() {
|
||||
if (command_length > 1) {
|
||||
RegWrite(PC_REGISTER, GdbHexToInt(command_buffer + 1), current_thread);
|
||||
Core::CPU().LoadContext(current_thread->context);
|
||||
Core::GetRunningCore().LoadContext(current_thread->context);
|
||||
}
|
||||
step_loop = true;
|
||||
halt_loop = true;
|
||||
send_trap = true;
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
|
||||
bool IsMemoryBreak() {
|
||||
|
@ -904,7 +917,7 @@ static void Continue() {
|
|||
memory_break = false;
|
||||
step_loop = false;
|
||||
halt_loop = false;
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -930,7 +943,7 @@ static bool CommitBreakpoint(BreakpointType type, VAddr addr, u32 len) {
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), addr, btrap.data(),
|
||||
btrap.size());
|
||||
Core::CPU().ClearInstructionCache();
|
||||
Core::GetRunningCore().ClearInstructionCache();
|
||||
}
|
||||
p.insert({addr, breakpoint});
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ bool HandleTable::IsValid(Handle handle) const {
|
|||
|
||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
if (handle == CurrentThread) {
|
||||
return SharedFrom(kernel.GetThreadManager().GetCurrentThread());
|
||||
return SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
} else if (handle == CurrentProcess) {
|
||||
return kernel.GetCurrentProcess();
|
||||
}
|
||||
|
|
|
@ -18,19 +18,27 @@ namespace Kernel {
|
|||
|
||||
/// Initialize the kernel
|
||||
KernelSystem::KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode)
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode,
|
||||
u32 num_cores)
|
||||
: memory(memory), timing(timing),
|
||||
prepare_reschedule_callback(std::move(prepare_reschedule_callback)) {
|
||||
MemoryInit(system_mode);
|
||||
|
||||
resource_limits = std::make_unique<ResourceLimitList>(*this);
|
||||
thread_manager = std::make_unique<ThreadManager>(*this);
|
||||
for (u32 core_id = 0; core_id < num_cores; ++core_id) {
|
||||
thread_managers.push_back(std::make_unique<ThreadManager>(*this, core_id));
|
||||
}
|
||||
timer_manager = std::make_unique<TimerManager>(timing);
|
||||
ipc_recorder = std::make_unique<IPCDebugger::Recorder>();
|
||||
stored_processes.assign(num_cores, nullptr);
|
||||
|
||||
next_thread_id = 1;
|
||||
}
|
||||
|
||||
/// Shutdown the kernel
|
||||
KernelSystem::~KernelSystem() = default;
|
||||
KernelSystem::~KernelSystem() {
|
||||
ResetThreadIDs();
|
||||
};
|
||||
|
||||
ResourceLimitList& KernelSystem::ResourceLimit() {
|
||||
return *resource_limits;
|
||||
|
@ -53,6 +61,15 @@ void KernelSystem::SetCurrentProcess(std::shared_ptr<Process> process) {
|
|||
SetCurrentMemoryPageTable(&process->vm_manager.page_table);
|
||||
}
|
||||
|
||||
void KernelSystem::SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id) {
|
||||
if (current_cpu->GetID() == core_id) {
|
||||
current_process = process;
|
||||
SetCurrentMemoryPageTable(&process->vm_manager.page_table);
|
||||
} else {
|
||||
stored_processes[core_id] = process;
|
||||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetCurrentMemoryPageTable(Memory::PageTable* page_table) {
|
||||
memory.SetCurrentPageTable(page_table);
|
||||
if (current_cpu != nullptr) {
|
||||
|
@ -60,17 +77,39 @@ void KernelSystem::SetCurrentMemoryPageTable(Memory::PageTable* page_table) {
|
|||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetCPU(std::shared_ptr<ARM_Interface> cpu) {
|
||||
void KernelSystem::SetCPUs(std::vector<std::shared_ptr<ARM_Interface>> cpus) {
|
||||
ASSERT(cpus.size() == thread_managers.size());
|
||||
u32 i = 0;
|
||||
for (const auto& cpu : cpus) {
|
||||
thread_managers[i++]->SetCPU(*cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void KernelSystem::SetRunningCPU(std::shared_ptr<ARM_Interface> cpu) {
|
||||
if (current_process) {
|
||||
stored_processes[current_cpu->GetID()] = current_process;
|
||||
}
|
||||
current_cpu = cpu;
|
||||
thread_manager->SetCPU(*cpu);
|
||||
timing.SetCurrentTimer(cpu->GetID());
|
||||
if (stored_processes[current_cpu->GetID()]) {
|
||||
SetCurrentProcess(stored_processes[current_cpu->GetID()]);
|
||||
}
|
||||
}
|
||||
|
||||
ThreadManager& KernelSystem::GetThreadManager() {
|
||||
return *thread_manager;
|
||||
ThreadManager& KernelSystem::GetThreadManager(u32 core_id) {
|
||||
return *thread_managers[core_id];
|
||||
}
|
||||
|
||||
const ThreadManager& KernelSystem::GetThreadManager() const {
|
||||
return *thread_manager;
|
||||
const ThreadManager& KernelSystem::GetThreadManager(u32 core_id) const {
|
||||
return *thread_managers[core_id];
|
||||
}
|
||||
|
||||
ThreadManager& KernelSystem::GetCurrentThreadManager() {
|
||||
return *thread_managers[current_cpu->GetID()];
|
||||
}
|
||||
|
||||
const ThreadManager& KernelSystem::GetCurrentThreadManager() const {
|
||||
return *thread_managers[current_cpu->GetID()];
|
||||
}
|
||||
|
||||
TimerManager& KernelSystem::GetTimerManager() {
|
||||
|
@ -101,4 +140,12 @@ void KernelSystem::AddNamedPort(std::string name, std::shared_ptr<ClientPort> po
|
|||
named_ports.emplace(std::move(name), std::move(port));
|
||||
}
|
||||
|
||||
u32 KernelSystem::NewThreadId() {
|
||||
return next_thread_id++;
|
||||
}
|
||||
|
||||
void KernelSystem::ResetThreadIDs() {
|
||||
next_thread_id = 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -85,7 +85,8 @@ enum class MemoryRegion : u16 {
|
|||
class KernelSystem {
|
||||
public:
|
||||
explicit KernelSystem(Memory::MemorySystem& memory, Core::Timing& timing,
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode);
|
||||
std::function<void()> prepare_reschedule_callback, u32 system_mode,
|
||||
u32 num_cores);
|
||||
~KernelSystem();
|
||||
|
||||
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
|
||||
|
@ -210,13 +211,19 @@ public:
|
|||
|
||||
std::shared_ptr<Process> GetCurrentProcess() const;
|
||||
void SetCurrentProcess(std::shared_ptr<Process> process);
|
||||
void SetCurrentProcessForCPU(std::shared_ptr<Process> process, u32 core_id);
|
||||
|
||||
void SetCurrentMemoryPageTable(Memory::PageTable* page_table);
|
||||
|
||||
void SetCPU(std::shared_ptr<ARM_Interface> cpu);
|
||||
void SetCPUs(std::vector<std::shared_ptr<ARM_Interface>> cpu);
|
||||
|
||||
ThreadManager& GetThreadManager();
|
||||
const ThreadManager& GetThreadManager() const;
|
||||
void SetRunningCPU(std::shared_ptr<ARM_Interface> cpu);
|
||||
|
||||
ThreadManager& GetThreadManager(u32 core_id);
|
||||
const ThreadManager& GetThreadManager(u32 core_id) const;
|
||||
|
||||
ThreadManager& GetCurrentThreadManager();
|
||||
const ThreadManager& GetCurrentThreadManager() const;
|
||||
|
||||
TimerManager& GetTimerManager();
|
||||
const TimerManager& GetTimerManager() const;
|
||||
|
@ -242,6 +249,10 @@ public:
|
|||
prepare_reschedule_callback();
|
||||
}
|
||||
|
||||
u32 NewThreadId();
|
||||
|
||||
void ResetThreadIDs();
|
||||
|
||||
/// Map of named ports managed by the kernel, which can be retrieved using the ConnectToPort
|
||||
std::unordered_map<std::string, std::shared_ptr<ClientPort>> named_ports;
|
||||
|
||||
|
@ -276,13 +287,16 @@ private:
|
|||
std::vector<std::shared_ptr<Process>> process_list;
|
||||
|
||||
std::shared_ptr<Process> current_process;
|
||||
std::vector<std::shared_ptr<Process>> stored_processes;
|
||||
|
||||
std::unique_ptr<ThreadManager> thread_manager;
|
||||
std::vector<std::unique_ptr<ThreadManager>> thread_managers;
|
||||
|
||||
std::unique_ptr<ConfigMem::Handler> config_mem_handler;
|
||||
std::unique_ptr<SharedPage::Handler> shared_page_handler;
|
||||
|
||||
std::unique_ptr<IPCDebugger::Recorder> ipc_recorder;
|
||||
|
||||
u32 next_thread_id;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -35,7 +35,7 @@ std::shared_ptr<Mutex> KernelSystem::CreateMutex(bool initial_locked, std::strin
|
|||
|
||||
// Acquire mutex with current thread if initialized as locked
|
||||
if (initial_locked)
|
||||
mutex->Acquire(thread_manager->GetCurrentThread());
|
||||
mutex->Acquire(thread_managers[current_cpu->GetID()]->GetCurrentThread());
|
||||
|
||||
return mutex;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ Handler::Handler(Core::Timing& timing) : timing(timing) {
|
|||
using namespace std::placeholders;
|
||||
update_time_event = timing.RegisterEvent("SharedPage::UpdateTimeCallback",
|
||||
std::bind(&Handler::UpdateTimeCallback, this, _1, _2));
|
||||
timing.ScheduleEvent(0, update_time_event);
|
||||
timing.ScheduleEvent(0, update_time_event, 0, 0);
|
||||
|
||||
float slidestate = Settings::values.factor_3d / 100.0f;
|
||||
shared_page.sliderstate_3d = static_cast<float_le>(slidestate);
|
||||
|
|
|
@ -280,12 +280,12 @@ void SVC::ExitProcess() {
|
|||
current_process->status = ProcessStatus::Exited;
|
||||
|
||||
// Stop all the process threads that are currently waiting for objects.
|
||||
auto& thread_list = kernel.GetThreadManager().GetThreadList();
|
||||
auto& thread_list = kernel.GetCurrentThreadManager().GetThreadList();
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->owner_process != current_process.get())
|
||||
continue;
|
||||
|
||||
if (thread.get() == kernel.GetThreadManager().GetCurrentThread())
|
||||
if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
|
@ -297,7 +297,7 @@ void SVC::ExitProcess() {
|
|||
}
|
||||
|
||||
// Kill the current thread
|
||||
kernel.GetThreadManager().GetCurrentThread()->Stop();
|
||||
kernel.GetCurrentThreadManager().GetCurrentThread()->Stop();
|
||||
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ ResultCode SVC::SendSyncRequest(Handle handle) {
|
|||
|
||||
system.PrepareReschedule();
|
||||
|
||||
auto thread = SharedFrom(kernel.GetThreadManager().GetCurrentThread());
|
||||
auto thread = SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
|
||||
if (kernel.GetIPCRecorder().IsEnabled()) {
|
||||
kernel.GetIPCRecorder().RegisterRequest(session, thread);
|
||||
|
@ -406,7 +406,7 @@ ResultCode SVC::CloseHandle(Handle handle) {
|
|||
/// Wait for a handle to synchronize, timeout after the specified nanoseconds
|
||||
ResultCode SVC::WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
||||
auto object = kernel.GetCurrentProcess()->handle_table.Get<WaitObject>(handle);
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
|
||||
if (object == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
@ -458,7 +458,7 @@ ResultCode SVC::WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
|||
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
||||
ResultCode SVC::WaitSynchronizationN(s32* out, VAddr handles_address, s32 handle_count,
|
||||
bool wait_all, s64 nano_seconds) {
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(*kernel.GetCurrentProcess(), handles_address))
|
||||
return ERR_INVALID_POINTER;
|
||||
|
@ -654,7 +654,7 @@ ResultCode SVC::ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_co
|
|||
|
||||
// We are also sending a command reply.
|
||||
// Do not send a reply if the command id in the command buffer is 0xFFFF.
|
||||
Thread* thread = kernel.GetThreadManager().GetCurrentThread();
|
||||
Thread* thread = kernel.GetCurrentThreadManager().GetCurrentThread();
|
||||
u32 cmd_buff_header = memory.Read32(thread->GetCommandBufferAddress());
|
||||
IPC::Header header{cmd_buff_header};
|
||||
if (reply_target != 0 && header.command_id != 0xFFFF) {
|
||||
|
@ -776,7 +776,7 @@ ResultCode SVC::ArbitrateAddress(Handle handle, u32 address, u32 type, u32 value
|
|||
return ERR_INVALID_HANDLE;
|
||||
|
||||
auto res =
|
||||
arbiter->ArbitrateAddress(SharedFrom(kernel.GetThreadManager().GetCurrentThread()),
|
||||
arbiter->ArbitrateAddress(SharedFrom(kernel.GetCurrentThreadManager().GetCurrentThread()),
|
||||
static_cast<ArbitrationType>(type), address, value, nanoseconds);
|
||||
|
||||
// TODO(Subv): Identify in which specific cases this call should cause a reschedule.
|
||||
|
@ -897,14 +897,19 @@ ResultCode SVC::CreateThread(Handle* out_handle, u32 entry_point, u32 arg, VAddr
|
|||
break;
|
||||
case ThreadProcessorIdAll:
|
||||
LOG_INFO(Kernel_SVC,
|
||||
"Newly created thread is allowed to be run in any Core, unimplemented.");
|
||||
"Newly created thread is allowed to be run in any Core, for now run in core 0.");
|
||||
processor_id = ThreadProcessorId0;
|
||||
break;
|
||||
case ThreadProcessorId1:
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Newly created thread must run in the SysCore (Core1), unimplemented.");
|
||||
case ThreadProcessorId2:
|
||||
case ThreadProcessorId3:
|
||||
// TODO: Check and log for: When processorid==0x2 and the process is not a BASE mem-region
|
||||
// process, exheader kernel-flags bitmask 0x2000 must be set (otherwise error 0xD9001BEA is
|
||||
// returned). When processorid==0x3 and the process is not a BASE mem-region process, error
|
||||
// 0xD9001BEA is returned. These are the only restriction checks done by the kernel for
|
||||
// processorid.
|
||||
break;
|
||||
default:
|
||||
// TODO(bunnei): Implement support for other processor IDs
|
||||
ASSERT_MSG(false, "Unsupported thread processor ID: {}", processor_id);
|
||||
break;
|
||||
}
|
||||
|
@ -930,9 +935,9 @@ ResultCode SVC::CreateThread(Handle* out_handle, u32 entry_point, u32 arg, VAddr
|
|||
|
||||
/// Called when a thread exits
|
||||
void SVC::ExitThread() {
|
||||
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.CPU().GetPC());
|
||||
LOG_TRACE(Kernel_SVC, "called, pc=0x{:08X}", system.GetRunningCore().GetPC());
|
||||
|
||||
kernel.GetThreadManager().ExitCurrentThread();
|
||||
kernel.GetCurrentThreadManager().ExitCurrentThread();
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
||||
|
@ -978,7 +983,7 @@ ResultCode SVC::SetThreadPriority(Handle handle, u32 priority) {
|
|||
/// Create a mutex
|
||||
ResultCode SVC::CreateMutex(Handle* out_handle, u32 initial_locked) {
|
||||
std::shared_ptr<Mutex> mutex = kernel.CreateMutex(initial_locked != 0);
|
||||
mutex->name = fmt::format("mutex-{:08x}", system.CPU().GetReg(14));
|
||||
mutex->name = fmt::format("mutex-{:08x}", system.GetRunningCore().GetReg(14));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(mutex)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called initial_locked={} : created handle=0x{:08X}",
|
||||
|
@ -995,7 +1000,7 @@ ResultCode SVC::ReleaseMutex(Handle handle) {
|
|||
if (mutex == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
||||
return mutex->Release(kernel.GetThreadManager().GetCurrentThread());
|
||||
return mutex->Release(kernel.GetCurrentThreadManager().GetCurrentThread());
|
||||
}
|
||||
|
||||
/// Get the ID of the specified process
|
||||
|
@ -1045,7 +1050,7 @@ ResultCode SVC::GetThreadId(u32* thread_id, Handle handle) {
|
|||
ResultCode SVC::CreateSemaphore(Handle* out_handle, s32 initial_count, s32 max_count) {
|
||||
CASCADE_RESULT(std::shared_ptr<Semaphore> semaphore,
|
||||
kernel.CreateSemaphore(initial_count, max_count));
|
||||
semaphore->name = fmt::format("semaphore-{:08x}", system.CPU().GetReg(14));
|
||||
semaphore->name = fmt::format("semaphore-{:08x}", system.GetRunningCore().GetReg(14));
|
||||
CASCADE_RESULT(*out_handle,
|
||||
kernel.GetCurrentProcess()->handle_table.Create(std::move(semaphore)));
|
||||
|
||||
|
@ -1115,8 +1120,9 @@ ResultCode SVC::QueryMemory(MemoryInfo* memory_info, PageInfo* page_info, u32 ad
|
|||
|
||||
/// Create an event
|
||||
ResultCode SVC::CreateEvent(Handle* out_handle, u32 reset_type) {
|
||||
std::shared_ptr<Event> evt = kernel.CreateEvent(
|
||||
static_cast<ResetType>(reset_type), fmt::format("event-{:08x}", system.CPU().GetReg(14)));
|
||||
std::shared_ptr<Event> evt =
|
||||
kernel.CreateEvent(static_cast<ResetType>(reset_type),
|
||||
fmt::format("event-{:08x}", system.GetRunningCore().GetReg(14)));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(evt)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
|
||||
|
@ -1158,8 +1164,9 @@ ResultCode SVC::ClearEvent(Handle handle) {
|
|||
|
||||
/// Creates a timer
|
||||
ResultCode SVC::CreateTimer(Handle* out_handle, u32 reset_type) {
|
||||
std::shared_ptr<Timer> timer = kernel.CreateTimer(
|
||||
static_cast<ResetType>(reset_type), fmt ::format("timer-{:08x}", system.CPU().GetReg(14)));
|
||||
std::shared_ptr<Timer> timer =
|
||||
kernel.CreateTimer(static_cast<ResetType>(reset_type),
|
||||
fmt ::format("timer-{:08x}", system.GetRunningCore().GetReg(14)));
|
||||
CASCADE_RESULT(*out_handle, kernel.GetCurrentProcess()->handle_table.Create(std::move(timer)));
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called reset_type=0x{:08X} : created handle=0x{:08X}", reset_type,
|
||||
|
@ -1213,7 +1220,7 @@ ResultCode SVC::CancelTimer(Handle handle) {
|
|||
void SVC::SleepThread(s64 nanoseconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||
|
||||
ThreadManager& thread_manager = kernel.GetThreadManager();
|
||||
ThreadManager& thread_manager = kernel.GetCurrentThreadManager();
|
||||
|
||||
// Don't attempt to yield execution if there are no available threads to run,
|
||||
// this way we avoid a useless reschedule to the idle thread.
|
||||
|
@ -1231,10 +1238,11 @@ void SVC::SleepThread(s64 nanoseconds) {
|
|||
|
||||
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
||||
s64 SVC::GetSystemTick() {
|
||||
s64 result = system.CoreTiming().GetTicks();
|
||||
// TODO: Use globalTicks here?
|
||||
s64 result = system.GetRunningCore().GetTimer()->GetTicks();
|
||||
// Advance time to defeat dumb games (like Cubic Ninja) that busy-wait for the frame to end.
|
||||
// Measured time between two calls on a 9.2 o3DS with Ninjhax 1.1b
|
||||
system.CoreTiming().AddTicks(150);
|
||||
system.GetRunningCore().GetTimer()->AddTicks(150);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1596,11 +1604,11 @@ void SVC::CallSVC(u32 immediate) {
|
|||
SVC::SVC(Core::System& system) : system(system), kernel(system.Kernel()), memory(system.Memory()) {}
|
||||
|
||||
u32 SVC::GetReg(std::size_t n) {
|
||||
return system.CPU().GetReg(static_cast<int>(n));
|
||||
return system.GetRunningCore().GetReg(static_cast<int>(n));
|
||||
}
|
||||
|
||||
void SVC::SetReg(std::size_t n, u32 value) {
|
||||
system.CPU().SetReg(static_cast<int>(n), value);
|
||||
system.GetRunningCore().SetReg(static_cast<int>(n), value);
|
||||
}
|
||||
|
||||
SVCContext::SVCContext(Core::System& system) : impl(std::make_unique<SVC>(system)) {}
|
||||
|
|
|
@ -33,13 +33,9 @@ void Thread::Acquire(Thread* thread) {
|
|||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
u32 ThreadManager::NewThreadId() {
|
||||
return next_thread_id++;
|
||||
}
|
||||
|
||||
Thread::Thread(KernelSystem& kernel)
|
||||
: WaitObject(kernel), context(kernel.GetThreadManager().NewContext()),
|
||||
thread_manager(kernel.GetThreadManager()) {}
|
||||
Thread::Thread(KernelSystem& kernel, u32 core_id)
|
||||
: WaitObject(kernel), context(kernel.GetThreadManager(core_id).NewContext()),
|
||||
thread_manager(kernel.GetThreadManager(core_id)) {}
|
||||
Thread::~Thread() {}
|
||||
|
||||
Thread* ThreadManager::GetCurrentThread() const {
|
||||
|
@ -84,7 +80,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
previous_thread->last_running_ticks = timing.GetTicks();
|
||||
previous_thread->last_running_ticks = timing.GetGlobalTicks();
|
||||
cpu->SaveContext(previous_thread->context);
|
||||
|
||||
if (previous_thread->status == ThreadStatus::Running) {
|
||||
|
@ -111,7 +107,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
new_thread->status = ThreadStatus::Running;
|
||||
|
||||
if (previous_process.get() != current_thread->owner_process) {
|
||||
kernel.SetCurrentProcess(SharedFrom(current_thread->owner_process));
|
||||
kernel.SetCurrentProcessForCPU(SharedFrom(current_thread->owner_process), cpu->GetID());
|
||||
}
|
||||
|
||||
cpu->LoadContext(new_thread->context);
|
||||
|
@ -124,7 +120,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
|||
}
|
||||
|
||||
Thread* ThreadManager::PopNextReadyThread() {
|
||||
Thread* next;
|
||||
Thread* next = nullptr;
|
||||
Thread* thread = GetCurrentThread();
|
||||
|
||||
if (thread && thread->status == ThreadStatus::Running) {
|
||||
|
@ -309,22 +305,22 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(std::string name,
|
|||
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
|
||||
}
|
||||
|
||||
auto thread{std::make_shared<Thread>(*this)};
|
||||
auto thread{std::make_shared<Thread>(*this, processor_id)};
|
||||
|
||||
thread_manager->thread_list.push_back(thread);
|
||||
thread_manager->ready_queue.prepare(priority);
|
||||
thread_managers[processor_id]->thread_list.push_back(thread);
|
||||
thread_managers[processor_id]->ready_queue.prepare(priority);
|
||||
|
||||
thread->thread_id = thread_manager->NewThreadId();
|
||||
thread->thread_id = NewThreadId();
|
||||
thread->status = ThreadStatus::Dormant;
|
||||
thread->entry_point = entry_point;
|
||||
thread->stack_top = stack_top;
|
||||
thread->nominal_priority = thread->current_priority = priority;
|
||||
thread->last_running_ticks = timing.GetTicks();
|
||||
thread->last_running_ticks = timing.GetGlobalTicks();
|
||||
thread->processor_id = processor_id;
|
||||
thread->wait_objects.clear();
|
||||
thread->wait_address = 0;
|
||||
thread->name = std::move(name);
|
||||
thread_manager->wakeup_callback_table[thread->thread_id] = thread.get();
|
||||
thread_managers[processor_id]->wakeup_callback_table[thread->thread_id] = thread.get();
|
||||
thread->owner_process = &owner_process;
|
||||
|
||||
// Find the next available TLS index, and mark it as used
|
||||
|
@ -369,7 +365,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread(std::string name,
|
|||
// to initialize the context
|
||||
ResetThreadContext(thread->context, stack_top, entry_point, arg);
|
||||
|
||||
thread_manager->ready_queue.push_back(thread->current_priority, thread.get());
|
||||
thread_managers[processor_id]->ready_queue.push_back(thread->current_priority, thread.get());
|
||||
thread->status = ThreadStatus::Ready;
|
||||
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
|
@ -435,6 +431,9 @@ void ThreadManager::Reschedule() {
|
|||
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
||||
} else if (next) {
|
||||
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
||||
} else {
|
||||
LOG_TRACE(Kernel, "context switch idle -> idle, do nothing");
|
||||
return;
|
||||
}
|
||||
|
||||
SwitchContext(next);
|
||||
|
@ -461,11 +460,10 @@ VAddr Thread::GetCommandBufferAddress() const {
|
|||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel) : kernel(kernel) {
|
||||
ThreadWakeupEventType =
|
||||
kernel.timing.RegisterEvent("ThreadWakeupCallback", [this](u64 thread_id, s64 cycle_late) {
|
||||
ThreadWakeupCallback(thread_id, cycle_late);
|
||||
});
|
||||
ThreadManager::ThreadManager(Kernel::KernelSystem& kernel, u32 core_id) : kernel(kernel) {
|
||||
ThreadWakeupEventType = kernel.timing.RegisterEvent(
|
||||
"ThreadWakeupCallback_" + std::to_string(core_id),
|
||||
[this](u64 thread_id, s64 cycle_late) { ThreadWakeupCallback(thread_id, cycle_late); });
|
||||
}
|
||||
|
||||
ThreadManager::~ThreadManager() {
|
||||
|
|
|
@ -34,7 +34,9 @@ enum ThreadProcessorId : s32 {
|
|||
ThreadProcessorIdAll = -1, ///< Run thread on either core
|
||||
ThreadProcessorId0 = 0, ///< Run thread on core 0 (AppCore)
|
||||
ThreadProcessorId1 = 1, ///< Run thread on core 1 (SysCore)
|
||||
ThreadProcessorIdMax = 2, ///< Processor ID must be less than this
|
||||
ThreadProcessorId2 = 2, ///< Run thread on core 2 (additional n3ds core)
|
||||
ThreadProcessorId3 = 3, ///< Run thread on core 3 (additional n3ds core)
|
||||
ThreadProcessorIdMax = 4, ///< Processor ID must be less than this
|
||||
};
|
||||
|
||||
enum class ThreadStatus {
|
||||
|
@ -57,15 +59,9 @@ enum class ThreadWakeupReason {
|
|||
|
||||
class ThreadManager {
|
||||
public:
|
||||
explicit ThreadManager(Kernel::KernelSystem& kernel);
|
||||
explicit ThreadManager(Kernel::KernelSystem& kernel, u32 core_id);
|
||||
~ThreadManager();
|
||||
|
||||
/**
|
||||
* Creates a new thread ID
|
||||
* @return The new thread ID
|
||||
*/
|
||||
u32 NewThreadId();
|
||||
|
||||
/**
|
||||
* Gets the current thread
|
||||
*/
|
||||
|
@ -132,7 +128,6 @@ private:
|
|||
Kernel::KernelSystem& kernel;
|
||||
ARM_Interface* cpu;
|
||||
|
||||
u32 next_thread_id = 1;
|
||||
std::shared_ptr<Thread> current_thread;
|
||||
Common::ThreadQueueList<Thread*, ThreadPrioLowest + 1> ready_queue;
|
||||
std::unordered_map<u64, Thread*> wakeup_callback_table;
|
||||
|
@ -149,7 +144,7 @@ private:
|
|||
|
||||
class Thread final : public WaitObject {
|
||||
public:
|
||||
explicit Thread(KernelSystem&);
|
||||
explicit Thread(KernelSystem&, u32 core_id);
|
||||
~Thread() override;
|
||||
|
||||
std::string GetName() const override {
|
||||
|
|
|
@ -55,7 +55,7 @@ VAddr CROHelper::SegmentTagToAddress(SegmentTag segment_tag) const {
|
|||
return 0;
|
||||
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, segment_tag.segment_index, entry);
|
||||
GetEntry(system.Memory(), segment_tag.segment_index, entry);
|
||||
|
||||
if (segment_tag.offset_into_segment >= entry.size)
|
||||
return 0;
|
||||
|
@ -71,12 +71,12 @@ ResultCode CROHelper::ApplyRelocation(VAddr target_address, RelocationType reloc
|
|||
break;
|
||||
case RelocationType::AbsoluteAddress:
|
||||
case RelocationType::AbsoluteAddress2:
|
||||
memory.Write32(target_address, symbol_address + addend);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, symbol_address + addend);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::RelativeAddress:
|
||||
memory.Write32(target_address, symbol_address + addend - target_future_address);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, symbol_address + addend - target_future_address);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::ThumbBranch:
|
||||
case RelocationType::ArmBranch:
|
||||
|
@ -98,8 +98,8 @@ ResultCode CROHelper::ClearRelocation(VAddr target_address, RelocationType reloc
|
|||
case RelocationType::AbsoluteAddress:
|
||||
case RelocationType::AbsoluteAddress2:
|
||||
case RelocationType::RelativeAddress:
|
||||
memory.Write32(target_address, 0);
|
||||
cpu.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
system.Memory().Write32(target_address, 0);
|
||||
system.InvalidateCacheRange(target_address, sizeof(u32));
|
||||
break;
|
||||
case RelocationType::ThumbBranch:
|
||||
case RelocationType::ArmBranch:
|
||||
|
@ -121,7 +121,8 @@ ResultCode CROHelper::ApplyRelocationBatch(VAddr batch, u32 symbol_address, bool
|
|||
VAddr relocation_address = batch;
|
||||
while (true) {
|
||||
RelocationEntry relocation;
|
||||
memory.ReadBlock(process, relocation_address, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_address, &relocation,
|
||||
sizeof(RelocationEntry));
|
||||
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
if (relocation_target == 0) {
|
||||
|
@ -142,9 +143,9 @@ ResultCode CROHelper::ApplyRelocationBatch(VAddr batch, u32 symbol_address, bool
|
|||
}
|
||||
|
||||
RelocationEntry relocation;
|
||||
memory.ReadBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().ReadBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
relocation.is_batch_resolved = reset ? 0 : 1;
|
||||
memory.WriteBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
system.Memory().WriteBlock(process, batch, &relocation, sizeof(RelocationEntry));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -154,13 +155,13 @@ VAddr CROHelper::FindExportNamedSymbol(const std::string& name) const {
|
|||
|
||||
std::size_t len = name.size();
|
||||
ExportTreeEntry entry;
|
||||
GetEntry(memory, 0, entry);
|
||||
GetEntry(system.Memory(), 0, entry);
|
||||
ExportTreeEntry::Child next;
|
||||
next.raw = entry.left.raw;
|
||||
u32 found_id;
|
||||
|
||||
while (true) {
|
||||
GetEntry(memory, next.next_index, entry);
|
||||
GetEntry(system.Memory(), next.next_index, entry);
|
||||
|
||||
if (next.is_end) {
|
||||
found_id = entry.export_table_index;
|
||||
|
@ -186,9 +187,9 @@ VAddr CROHelper::FindExportNamedSymbol(const std::string& name) const {
|
|||
|
||||
u32 export_strings_size = GetField(ExportStringsSize);
|
||||
ExportNamedSymbolEntry symbol_entry;
|
||||
GetEntry(memory, found_id, symbol_entry);
|
||||
GetEntry(system.Memory(), found_id, symbol_entry);
|
||||
|
||||
if (memory.ReadCString(symbol_entry.name_offset, export_strings_size) != name)
|
||||
if (system.Memory().ReadCString(symbol_entry.name_offset, export_strings_size) != name)
|
||||
return 0;
|
||||
|
||||
return SegmentTagToAddress(symbol_entry.symbol_position);
|
||||
|
@ -279,7 +280,7 @@ ResultVal<VAddr> CROHelper::RebaseSegmentTable(u32 cro_size, VAddr data_segment_
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry segment;
|
||||
GetEntry(memory, i, segment);
|
||||
GetEntry(system.Memory(), i, segment);
|
||||
if (segment.type == SegmentType::Data) {
|
||||
if (segment.size != 0) {
|
||||
if (segment.size > data_segment_size)
|
||||
|
@ -298,7 +299,7 @@ ResultVal<VAddr> CROHelper::RebaseSegmentTable(u32 cro_size, VAddr data_segment_
|
|||
if (segment.offset > module_address + cro_size)
|
||||
return CROFormatError(0x19);
|
||||
}
|
||||
SetEntry(memory, i, segment);
|
||||
SetEntry(system.Memory(), i, segment);
|
||||
}
|
||||
return MakeResult<u32>(prev_data_segment + module_address);
|
||||
}
|
||||
|
@ -310,7 +311,7 @@ ResultCode CROHelper::RebaseExportNamedSymbolTable() {
|
|||
u32 export_named_symbol_num = GetField(ExportNamedSymbolNum);
|
||||
for (u32 i = 0; i < export_named_symbol_num; ++i) {
|
||||
ExportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -320,7 +321,7 @@ ResultCode CROHelper::RebaseExportNamedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -329,7 +330,7 @@ ResultCode CROHelper::VerifyExportTreeTable() const {
|
|||
u32 tree_num = GetField(ExportTreeNum);
|
||||
for (u32 i = 0; i < tree_num; ++i) {
|
||||
ExportTreeEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.left.next_index >= tree_num || entry.right.next_index >= tree_num) {
|
||||
return CROFormatError(0x11);
|
||||
|
@ -353,7 +354,7 @@ ResultCode CROHelper::RebaseImportModuleTable() {
|
|||
u32 module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -379,7 +380,7 @@ ResultCode CROHelper::RebaseImportModuleTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -395,7 +396,7 @@ ResultCode CROHelper::RebaseImportNamedSymbolTable() {
|
|||
u32 num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset += module_address;
|
||||
|
@ -413,7 +414,7 @@ ResultCode CROHelper::RebaseImportNamedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -427,7 +428,7 @@ ResultCode CROHelper::RebaseImportIndexedSymbolTable() {
|
|||
u32 num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset += module_address;
|
||||
|
@ -437,7 +438,7 @@ ResultCode CROHelper::RebaseImportIndexedSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -451,7 +452,7 @@ ResultCode CROHelper::RebaseImportAnonymousSymbolTable() {
|
|||
u32 num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset += module_address;
|
||||
|
@ -461,7 +462,7 @@ ResultCode CROHelper::RebaseImportAnonymousSymbolTable() {
|
|||
}
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -476,14 +477,14 @@ ResultCode CROHelper::ResetExternalRelocations() {
|
|||
ExternalRelocationEntry relocation;
|
||||
|
||||
// Verifies that the last relocation is the end of a batch
|
||||
GetEntry(memory, external_relocation_num - 1, relocation);
|
||||
GetEntry(system.Memory(), external_relocation_num - 1, relocation);
|
||||
if (!relocation.is_batch_end) {
|
||||
return CROFormatError(0x12);
|
||||
}
|
||||
|
||||
bool batch_begin = true;
|
||||
for (u32 i = 0; i < external_relocation_num; ++i) {
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (relocation_target == 0) {
|
||||
|
@ -500,7 +501,7 @@ ResultCode CROHelper::ResetExternalRelocations() {
|
|||
if (batch_begin) {
|
||||
// resets to unresolved state
|
||||
relocation.is_batch_resolved = 0;
|
||||
SetEntry(memory, i, relocation);
|
||||
SetEntry(system.Memory(), i, relocation);
|
||||
}
|
||||
|
||||
// if current is an end, then the next is a beginning
|
||||
|
@ -516,7 +517,7 @@ ResultCode CROHelper::ClearExternalRelocations() {
|
|||
|
||||
bool batch_begin = true;
|
||||
for (u32 i = 0; i < external_relocation_num; ++i) {
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr relocation_target = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (relocation_target == 0) {
|
||||
|
@ -532,7 +533,7 @@ ResultCode CROHelper::ClearExternalRelocations() {
|
|||
if (batch_begin) {
|
||||
// resets to unresolved state
|
||||
relocation.is_batch_resolved = 0;
|
||||
SetEntry(memory, i, relocation);
|
||||
SetEntry(system.Memory(), i, relocation);
|
||||
}
|
||||
|
||||
// if current is an end, then the next is a beginning
|
||||
|
@ -548,13 +549,13 @@ ResultCode CROHelper::ApplyStaticAnonymousSymbolToCRS(VAddr crs_address) {
|
|||
static_relocation_table_offset +
|
||||
GetField(StaticRelocationNum) * sizeof(StaticRelocationEntry);
|
||||
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
u32 offset_export_num = GetField(StaticAnonymousSymbolNum);
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" exports {} static anonymous symbols", ModuleName(),
|
||||
offset_export_num);
|
||||
for (u32 i = 0; i < offset_export_num; ++i) {
|
||||
StaticAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
u32 batch_address = entry.relocation_batch_offset + module_address;
|
||||
|
||||
if (batch_address < static_relocation_table_offset ||
|
||||
|
@ -579,7 +580,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
u32 internal_relocation_num = GetField(InternalRelocationNum);
|
||||
for (u32 i = 0; i < internal_relocation_num; ++i) {
|
||||
InternalRelocationEntry relocation;
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr target_addressB = SegmentTagToAddress(relocation.target_position);
|
||||
if (target_addressB == 0) {
|
||||
return CROFormatError(0x15);
|
||||
|
@ -587,7 +588,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
|
||||
VAddr target_address;
|
||||
SegmentEntry target_segment;
|
||||
GetEntry(memory, relocation.target_position.segment_index, target_segment);
|
||||
GetEntry(system.Memory(), relocation.target_position.segment_index, target_segment);
|
||||
|
||||
if (target_segment.type == SegmentType::Data) {
|
||||
// If the relocation is to the .data segment, we need to relocate it in the old buffer
|
||||
|
@ -602,7 +603,7 @@ ResultCode CROHelper::ApplyInternalRelocations(u32 old_data_segment_address) {
|
|||
}
|
||||
|
||||
SegmentEntry symbol_segment;
|
||||
GetEntry(memory, relocation.symbol_segment, symbol_segment);
|
||||
GetEntry(system.Memory(), relocation.symbol_segment, symbol_segment);
|
||||
LOG_TRACE(Service_LDR, "Internally relocates 0x{:08X} with 0x{:08X}", target_address,
|
||||
symbol_segment.offset);
|
||||
ResultCode result = ApplyRelocation(target_address, relocation.type, relocation.addend,
|
||||
|
@ -619,7 +620,7 @@ ResultCode CROHelper::ClearInternalRelocations() {
|
|||
u32 internal_relocation_num = GetField(InternalRelocationNum);
|
||||
for (u32 i = 0; i < internal_relocation_num; ++i) {
|
||||
InternalRelocationEntry relocation;
|
||||
GetEntry(memory, i, relocation);
|
||||
GetEntry(system.Memory(), i, relocation);
|
||||
VAddr target_address = SegmentTagToAddress(relocation.target_position);
|
||||
|
||||
if (target_address == 0) {
|
||||
|
@ -639,13 +640,13 @@ void CROHelper::UnrebaseImportAnonymousSymbolTable() {
|
|||
u32 num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -653,13 +654,13 @@ void CROHelper::UnrebaseImportIndexedSymbolTable() {
|
|||
u32 num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.relocation_batch_offset != 0) {
|
||||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,7 +668,7 @@ void CROHelper::UnrebaseImportNamedSymbolTable() {
|
|||
u32 num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
|
@ -677,7 +678,7 @@ void CROHelper::UnrebaseImportNamedSymbolTable() {
|
|||
entry.relocation_batch_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -685,7 +686,7 @@ void CROHelper::UnrebaseImportModuleTable() {
|
|||
u32 module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
|
@ -699,7 +700,7 @@ void CROHelper::UnrebaseImportModuleTable() {
|
|||
entry.import_anonymous_symbol_table_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,13 +708,13 @@ void CROHelper::UnrebaseExportNamedSymbolTable() {
|
|||
u32 export_named_symbol_num = GetField(ExportNamedSymbolNum);
|
||||
for (u32 i = 0; i < export_named_symbol_num; ++i) {
|
||||
ExportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (entry.name_offset != 0) {
|
||||
entry.name_offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, entry);
|
||||
SetEntry(system.Memory(), i, entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -721,7 +722,7 @@ void CROHelper::UnrebaseSegmentTable() {
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry segment;
|
||||
GetEntry(memory, i, segment);
|
||||
GetEntry(system.Memory(), i, segment);
|
||||
|
||||
if (segment.type == SegmentType::BSS) {
|
||||
segment.offset = 0;
|
||||
|
@ -729,7 +730,7 @@ void CROHelper::UnrebaseSegmentTable() {
|
|||
segment.offset -= module_address;
|
||||
}
|
||||
|
||||
SetEntry(memory, i, segment);
|
||||
SetEntry(system.Memory(), i, segment);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -751,17 +752,17 @@ ResultCode CROHelper::ApplyImportNamedSymbol(VAddr crs_address) {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (!relocation_entry.is_batch_resolved) {
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, import_strings_size);
|
||||
u32 symbol_address = source.FindExportNamedSymbol(symbol_name);
|
||||
|
||||
if (symbol_address != 0) {
|
||||
|
@ -794,11 +795,11 @@ ResultCode CROHelper::ResetImportNamedSymbol() {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -815,11 +816,11 @@ ResultCode CROHelper::ResetImportIndexedSymbol() {
|
|||
u32 import_num = GetField(ImportIndexedSymbolNum);
|
||||
for (u32 i = 0; i < import_num; ++i) {
|
||||
ImportIndexedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -836,11 +837,11 @@ ResultCode CROHelper::ResetImportAnonymousSymbol() {
|
|||
u32 import_num = GetField(ImportAnonymousSymbolNum);
|
||||
for (u32 i = 0; i < import_num; ++i) {
|
||||
ImportAnonymousSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
ResultCode result = ApplyRelocationBatch(relocation_addr, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -857,19 +858,20 @@ ResultCode CROHelper::ApplyModuleImport(VAddr crs_address) {
|
|||
u32 import_module_num = GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
std::string want_cro_name = memory.ReadCString(entry.name_offset, import_strings_size);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
std::string want_cro_name =
|
||||
system.Memory().ReadCString(entry.name_offset, import_strings_size);
|
||||
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
if (want_cro_name == source.ModuleName()) {
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" imports {} indexed symbols from \"{}\"",
|
||||
ModuleName(), entry.import_indexed_symbol_num, source.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ExportIndexedSymbolEntry ex;
|
||||
source.GetEntry(memory, im.index, ex);
|
||||
source.GetEntry(system.Memory(), im.index, ex);
|
||||
u32 symbol_address = source.SegmentTagToAddress(ex.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " Imports 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -884,7 +886,7 @@ ResultCode CROHelper::ApplyModuleImport(VAddr crs_address) {
|
|||
ModuleName(), entry.import_anonymous_symbol_num, source.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
u32 symbol_address = source.SegmentTagToAddress(im.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " Imports 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -913,15 +915,15 @@ ResultCode CROHelper::ApplyExportNamedSymbol(CROHelper target) {
|
|||
u32 target_symbol_import_num = target.GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < target_symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (!relocation_entry.is_batch_resolved) {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, target_import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, target_import_strings_size);
|
||||
u32 symbol_address = FindExportNamedSymbol(symbol_name);
|
||||
if (symbol_address != 0) {
|
||||
LOG_TRACE(Service_LDR, " exports symbol \"{}\"", symbol_name);
|
||||
|
@ -944,15 +946,15 @@ ResultCode CROHelper::ResetExportNamedSymbol(CROHelper target) {
|
|||
u32 target_symbol_import_num = target.GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < target_symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (relocation_entry.is_batch_resolved) {
|
||||
std::string symbol_name =
|
||||
memory.ReadCString(entry.name_offset, target_import_strings_size);
|
||||
system.Memory().ReadCString(entry.name_offset, target_import_strings_size);
|
||||
u32 symbol_address = FindExportNamedSymbol(symbol_name);
|
||||
if (symbol_address != 0) {
|
||||
LOG_TRACE(Service_LDR, " unexports symbol \"{}\"", symbol_name);
|
||||
|
@ -974,18 +976,19 @@ ResultCode CROHelper::ApplyModuleExport(CROHelper target) {
|
|||
u32 target_import_module_num = target.GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < target_import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, target_import_string_size) != module_name)
|
||||
if (system.Memory().ReadCString(entry.name_offset, target_import_string_size) !=
|
||||
module_name)
|
||||
continue;
|
||||
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" exports {} indexed symbols to \"{}\"", module_name,
|
||||
entry.import_indexed_symbol_num, target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ExportIndexedSymbolEntry ex;
|
||||
GetEntry(memory, im.index, ex);
|
||||
GetEntry(system.Memory(), im.index, ex);
|
||||
u32 symbol_address = SegmentTagToAddress(ex.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " exports symbol 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -1000,7 +1003,7 @@ ResultCode CROHelper::ApplyModuleExport(CROHelper target) {
|
|||
entry.import_anonymous_symbol_num, target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
u32 symbol_address = SegmentTagToAddress(im.symbol_position);
|
||||
LOG_TRACE(Service_LDR, " exports symbol 0x{:08X}", symbol_address);
|
||||
ResultCode result =
|
||||
|
@ -1023,16 +1026,17 @@ ResultCode CROHelper::ResetModuleExport(CROHelper target) {
|
|||
u32 target_import_module_num = target.GetField(ImportModuleNum);
|
||||
for (u32 i = 0; i < target_import_module_num; ++i) {
|
||||
ImportModuleEntry entry;
|
||||
target.GetEntry(memory, i, entry);
|
||||
target.GetEntry(system.Memory(), i, entry);
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, target_import_string_size) != module_name)
|
||||
if (system.Memory().ReadCString(entry.name_offset, target_import_string_size) !=
|
||||
module_name)
|
||||
continue;
|
||||
|
||||
LOG_DEBUG(Service_LDR, "CRO \"{}\" unexports indexed symbols to \"{}\"", module_name,
|
||||
target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_indexed_symbol_num; ++j) {
|
||||
ImportIndexedSymbolEntry im;
|
||||
entry.GetImportIndexedSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportIndexedSymbolEntry(process, system.Memory(), j, im);
|
||||
ResultCode result =
|
||||
target.ApplyRelocationBatch(im.relocation_batch_offset, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -1045,7 +1049,7 @@ ResultCode CROHelper::ResetModuleExport(CROHelper target) {
|
|||
target.ModuleName());
|
||||
for (u32 j = 0; j < entry.import_anonymous_symbol_num; ++j) {
|
||||
ImportAnonymousSymbolEntry im;
|
||||
entry.GetImportAnonymousSymbolEntry(process, memory, j, im);
|
||||
entry.GetImportAnonymousSymbolEntry(process, system.Memory(), j, im);
|
||||
ResultCode result =
|
||||
target.ApplyRelocationBatch(im.relocation_batch_offset, unresolved_symbol, true);
|
||||
if (result.IsError()) {
|
||||
|
@ -1063,15 +1067,16 @@ ResultCode CROHelper::ApplyExitRelocations(VAddr crs_address) {
|
|||
u32 symbol_import_num = GetField(ImportNamedSymbolNum);
|
||||
for (u32 i = 0; i < symbol_import_num; ++i) {
|
||||
ImportNamedSymbolEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
VAddr relocation_addr = entry.relocation_batch_offset;
|
||||
ExternalRelocationEntry relocation_entry;
|
||||
memory.ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
system.Memory().ReadBlock(process, relocation_addr, &relocation_entry,
|
||||
sizeof(ExternalRelocationEntry));
|
||||
|
||||
if (memory.ReadCString(entry.name_offset, import_strings_size) == "__aeabi_atexit") {
|
||||
if (system.Memory().ReadCString(entry.name_offset, import_strings_size) ==
|
||||
"__aeabi_atexit") {
|
||||
ResultCode result = ForEachAutoLinkCRO(
|
||||
process, memory, cpu, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
process, system, crs_address, [&](CROHelper source) -> ResultVal<bool> {
|
||||
u32 symbol_address = source.FindExportNamedSymbol("nnroAeabiAtexit_");
|
||||
|
||||
if (symbol_address != 0) {
|
||||
|
@ -1126,7 +1131,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result = VerifyStringTableLength(memory, GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ModuleNameOffset),
|
||||
GetField(ModuleNameSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying module name {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1155,8 +1161,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result =
|
||||
VerifyStringTableLength(memory, GetField(ExportStringsOffset), GetField(ExportStringsSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ExportStringsOffset),
|
||||
GetField(ExportStringsSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying export strings {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1192,8 +1198,8 @@ ResultCode CROHelper::Rebase(VAddr crs_address, u32 cro_size, VAddr data_segment
|
|||
return result;
|
||||
}
|
||||
|
||||
result =
|
||||
VerifyStringTableLength(memory, GetField(ImportStringsOffset), GetField(ImportStringsSize));
|
||||
result = VerifyStringTableLength(system.Memory(), GetField(ImportStringsOffset),
|
||||
GetField(ImportStringsSize));
|
||||
if (result.IsError()) {
|
||||
LOG_ERROR(Service_LDR, "Error verifying import strings {:08X}", result.raw);
|
||||
return result;
|
||||
|
@ -1266,11 +1272,11 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
// so we do the same
|
||||
if (GetField(SegmentNum) >= 2) { // means we have .data segment
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, 2, entry);
|
||||
GetEntry(system.Memory(), 2, entry);
|
||||
ASSERT(entry.type == SegmentType::Data);
|
||||
data_segment_address = entry.offset;
|
||||
entry.offset = GetField(DataOffset);
|
||||
SetEntry(memory, 2, entry);
|
||||
SetEntry(system.Memory(), 2, entry);
|
||||
}
|
||||
}
|
||||
SCOPE_EXIT({
|
||||
|
@ -1278,9 +1284,9 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
if (link_on_load_bug_fix) {
|
||||
if (GetField(SegmentNum) >= 2) {
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, 2, entry);
|
||||
GetEntry(system.Memory(), 2, entry);
|
||||
entry.offset = data_segment_address;
|
||||
SetEntry(memory, 2, entry);
|
||||
SetEntry(system.Memory(), 2, entry);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -1301,7 +1307,7 @@ ResultCode CROHelper::Link(VAddr crs_address, bool link_on_load_bug_fix) {
|
|||
}
|
||||
|
||||
// Exports symbols to other modules
|
||||
result = ForEachAutoLinkCRO(process, memory, cpu, crs_address,
|
||||
result = ForEachAutoLinkCRO(process, system, crs_address,
|
||||
[this](CROHelper target) -> ResultVal<bool> {
|
||||
ResultCode result = ApplyExportNamedSymbol(target);
|
||||
if (result.IsError())
|
||||
|
@ -1346,7 +1352,7 @@ ResultCode CROHelper::Unlink(VAddr crs_address) {
|
|||
|
||||
// Resets all symbols in other modules imported from this module
|
||||
// Note: the RO service seems only searching in auto-link modules
|
||||
result = ForEachAutoLinkCRO(process, memory, cpu, crs_address,
|
||||
result = ForEachAutoLinkCRO(process, system, crs_address,
|
||||
[this](CROHelper target) -> ResultVal<bool> {
|
||||
ResultCode result = ResetExportNamedSymbol(target);
|
||||
if (result.IsError())
|
||||
|
@ -1387,13 +1393,13 @@ void CROHelper::InitCRS() {
|
|||
}
|
||||
|
||||
void CROHelper::Register(VAddr crs_address, bool auto_link) {
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper head(auto_link ? crs.NextModule() : crs.PreviousModule(), process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
CROHelper head(auto_link ? crs.NextModule() : crs.PreviousModule(), process, system);
|
||||
|
||||
if (head.module_address) {
|
||||
// there are already CROs registered
|
||||
// register as the new tail
|
||||
CROHelper tail(head.PreviousModule(), process, memory, cpu);
|
||||
CROHelper tail(head.PreviousModule(), process, system);
|
||||
|
||||
// link with the old tail
|
||||
ASSERT(tail.NextModule() == 0);
|
||||
|
@ -1419,11 +1425,11 @@ void CROHelper::Register(VAddr crs_address, bool auto_link) {
|
|||
}
|
||||
|
||||
void CROHelper::Unregister(VAddr crs_address) {
|
||||
CROHelper crs(crs_address, process, memory, cpu);
|
||||
CROHelper next_head(crs.NextModule(), process, memory, cpu);
|
||||
CROHelper previous_head(crs.PreviousModule(), process, memory, cpu);
|
||||
CROHelper next(NextModule(), process, memory, cpu);
|
||||
CROHelper previous(PreviousModule(), process, memory, cpu);
|
||||
CROHelper crs(crs_address, process, system);
|
||||
CROHelper next_head(crs.NextModule(), process, system);
|
||||
CROHelper previous_head(crs.PreviousModule(), process, system);
|
||||
CROHelper next(NextModule(), process, system);
|
||||
CROHelper previous(PreviousModule(), process, system);
|
||||
|
||||
if (module_address == next_head.module_address ||
|
||||
module_address == previous_head.module_address) {
|
||||
|
@ -1517,7 +1523,7 @@ std::tuple<VAddr, u32> CROHelper::GetExecutablePages() const {
|
|||
u32 segment_num = GetField(SegmentNum);
|
||||
for (u32 i = 0; i < segment_num; ++i) {
|
||||
SegmentEntry entry;
|
||||
GetEntry(memory, i, entry);
|
||||
GetEntry(system.Memory(), i, entry);
|
||||
if (entry.type == SegmentType::Code && entry.size != 0) {
|
||||
VAddr begin = Common::AlignDown(entry.offset, Memory::PAGE_SIZE);
|
||||
VAddr end = Common::AlignUp(entry.offset + entry.size, Memory::PAGE_SIZE);
|
||||
|
|
|
@ -33,12 +33,11 @@ static constexpr u32 CRO_HASH_SIZE = 0x80;
|
|||
class CROHelper final {
|
||||
public:
|
||||
// TODO (wwylele): pass in the process handle for memory access
|
||||
explicit CROHelper(VAddr cro_address, Kernel::Process& process, Memory::MemorySystem& memory,
|
||||
ARM_Interface& cpu)
|
||||
: module_address(cro_address), process(process), memory(memory), cpu(cpu) {}
|
||||
explicit CROHelper(VAddr cro_address, Kernel::Process& process, Core::System& system)
|
||||
: module_address(cro_address), process(process), system(system) {}
|
||||
|
||||
std::string ModuleName() const {
|
||||
return memory.ReadCString(GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
return system.Memory().ReadCString(GetField(ModuleNameOffset), GetField(ModuleNameSize));
|
||||
}
|
||||
|
||||
u32 GetFileSize() const {
|
||||
|
@ -144,8 +143,7 @@ public:
|
|||
private:
|
||||
const VAddr module_address; ///< the virtual address of this module
|
||||
Kernel::Process& process; ///< the owner process of this module
|
||||
Memory::MemorySystem& memory;
|
||||
ARM_Interface& cpu;
|
||||
Core::System& system;
|
||||
|
||||
/**
|
||||
* Each item in this enum represents a u32 field in the header begin from address+0x80,
|
||||
|
@ -403,11 +401,11 @@ private:
|
|||
}
|
||||
|
||||
u32 GetField(HeaderField field) const {
|
||||
return memory.Read32(Field(field));
|
||||
return system.Memory().Read32(Field(field));
|
||||
}
|
||||
|
||||
void SetField(HeaderField field, u32 value) {
|
||||
memory.Write32(Field(field), value);
|
||||
system.Memory().Write32(Field(field), value);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -474,12 +472,11 @@ private:
|
|||
* otherwise error code of the last iteration.
|
||||
*/
|
||||
template <typename FunctionObject>
|
||||
static ResultCode ForEachAutoLinkCRO(Kernel::Process& process, Memory::MemorySystem& memory,
|
||||
ARM_Interface& cpu, VAddr crs_address,
|
||||
FunctionObject func) {
|
||||
static ResultCode ForEachAutoLinkCRO(Kernel::Process& process, Core::System& system,
|
||||
VAddr crs_address, FunctionObject func) {
|
||||
VAddr current = crs_address;
|
||||
while (current != 0) {
|
||||
CROHelper cro(current, process, memory, cpu);
|
||||
CROHelper cro(current, process, system);
|
||||
CASCADE_RESULT(bool next, func(cro));
|
||||
if (!next)
|
||||
break;
|
||||
|
|
|
@ -115,7 +115,7 @@ void RO::Initialize(Kernel::HLERequestContext& ctx) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper crs(crs_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper crs(crs_address, *process, system);
|
||||
crs.InitCRS();
|
||||
|
||||
result = crs.Rebase(0, crs_size, 0, 0, 0, 0, true);
|
||||
|
@ -249,7 +249,7 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
result = cro.VerifyHash(cro_size, crr_address);
|
||||
if (result.IsError()) {
|
||||
|
@ -313,7 +313,7 @@ void RO::LoadCRO(Kernel::HLERequestContext& ctx, bool link_on_load_bug_fix) {
|
|||
}
|
||||
}
|
||||
|
||||
system.CPU().InvalidateCacheRange(cro_address, cro_size);
|
||||
system.InvalidateCacheRange(cro_address, cro_size);
|
||||
|
||||
LOG_INFO(Service_LDR, "CRO \"{}\" loaded at 0x{:08X}, fixed_end=0x{:08X}", cro.ModuleName(),
|
||||
cro_address, cro_address + fix_size);
|
||||
|
@ -331,7 +331,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) {
|
|||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}, zero={}, cro_buffer_ptr=0x{:08X}",
|
||||
cro_address, zero, cro_buffer_ptr);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -386,7 +386,7 @@ void RO::UnloadCRO(Kernel::HLERequestContext& ctx) {
|
|||
LOG_ERROR(Service_LDR, "Error unmapping CRO {:08X}", result.raw);
|
||||
}
|
||||
|
||||
system.CPU().InvalidateCacheRange(cro_address, fixed_size);
|
||||
system.InvalidateCacheRange(cro_address, fixed_size);
|
||||
|
||||
rb.Push(result);
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ void RO::LinkCRO(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}", cro_address);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -438,7 +438,7 @@ void RO::UnlinkCRO(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
LOG_DEBUG(Service_LDR, "called, cro_address=0x{:08X}", cro_address);
|
||||
|
||||
CROHelper cro(cro_address, *process, system.Memory(), system.CPU());
|
||||
CROHelper cro(cro_address, *process, system);
|
||||
|
||||
IPC::RequestBuilder rb = rp.MakeBuilder(1, 0);
|
||||
|
||||
|
@ -487,7 +487,7 @@ void RO::Shutdown(Kernel::HLERequestContext& ctx) {
|
|||
return;
|
||||
}
|
||||
|
||||
CROHelper crs(slot->loaded_crs, *process, system.Memory(), system.CPU());
|
||||
CROHelper crs(slot->loaded_crs, *process, system);
|
||||
crs.Unrebase(true);
|
||||
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
|
|
|
@ -46,7 +46,9 @@ void RPCServer::HandleWriteMemory(Packet& packet, u32 address, const u8* data, u
|
|||
Core::System::GetInstance().Memory().WriteBlock(
|
||||
*Core::System::GetInstance().Kernel().GetCurrentProcess(), address, data, data_size);
|
||||
// If the memory happens to be executable code, make sure the changes become visible
|
||||
Core::CPU().InvalidateCacheRange(address, data_size);
|
||||
|
||||
// Is current core correct here?
|
||||
Core::System::GetInstance().InvalidateCacheRange(address, data_size);
|
||||
}
|
||||
packet.SetPacketDataSize(0);
|
||||
packet.SendReply();
|
||||
|
|
|
@ -15,9 +15,9 @@ static Memory::PageTable* page_table = nullptr;
|
|||
TestEnvironment::TestEnvironment(bool mutable_memory_)
|
||||
: mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) {
|
||||
|
||||
timing = std::make_unique<Core::Timing>();
|
||||
timing = std::make_unique<Core::Timing>(1);
|
||||
memory = std::make_unique<Memory::MemorySystem>();
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing, [] {}, 0);
|
||||
kernel = std::make_unique<Kernel::KernelSystem>(*memory, *timing, [] {}, 0, 1);
|
||||
|
||||
kernel->SetCurrentProcess(kernel->CreateProcess(kernel->CreateCodeSet("", 0)));
|
||||
page_table = &kernel->GetCurrentProcess()->vm_manager.page_table;
|
||||
|
|
|
@ -23,7 +23,7 @@ TEST_CASE("ARM_DynCom (vfp): vadd", "[arm_dyncom]") {
|
|||
test_env.SetMemory32(0, 0xEE321A03); // vadd.f32 s2, s4, s6
|
||||
test_env.SetMemory32(4, 0xEAFFFFFE); // b +#0
|
||||
|
||||
ARM_DynCom dyncom(nullptr, test_env.GetMemory(), USER32MODE);
|
||||
ARM_DynCom dyncom(nullptr, test_env.GetMemory(), USER32MODE, 0, nullptr);
|
||||
|
||||
std::vector<VfpTestCase> test_cases{{
|
||||
#include "vfp_vadd_f32.inc"
|
||||
|
|
|
@ -34,16 +34,16 @@ static void AdvanceAndCheck(Core::Timing& timing, u32 idx, int downcount, int ex
|
|||
expected_callback = CB_IDS[idx];
|
||||
lateness = expected_lateness;
|
||||
|
||||
timing.AddTicks(timing.GetDowncount() -
|
||||
cpu_downcount); // Pretend we executed X cycles of instructions.
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount() -
|
||||
cpu_downcount); // Pretend we executed X cycles of instructions.
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags);
|
||||
REQUIRE(downcount == timing.GetDowncount());
|
||||
REQUIRE(downcount == timing.GetTimer(0)->GetDowncount());
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
@ -52,60 +52,19 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
|||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 3, 400);
|
||||
AdvanceAndCheck(timing, 1, 300);
|
||||
AdvanceAndCheck(timing, 2, 200);
|
||||
AdvanceAndCheck(timing, 0, 200);
|
||||
AdvanceAndCheck(timing, 4, MAX_SLICE_LENGTH);
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[Threadsave]", "[core]") {
|
||||
Core::Timing timing;
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
Core::TimingEventType* cb_c = timing.RegisterEvent("callbackC", CallbackTemplate<2>);
|
||||
Core::TimingEventType* cb_d = timing.RegisterEvent("callbackD", CallbackTemplate<3>);
|
||||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
timing.ScheduleEventThreadsafe(1000, cb_a, CB_IDS[0]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(1000);
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(500, cb_b, CB_IDS[1]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(500);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(800, cb_c, CB_IDS[2]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(800);
|
||||
REQUIRE(500 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(100, cb_d, CB_IDS[3]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(100);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEventThreadsafe(1200, cb_e, CB_IDS[4]);
|
||||
// Manually force since ScheduleEventThreadsafe doesn't call it
|
||||
timing.ForceExceptionCheck(1200);
|
||||
REQUIRE(100 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0], 0);
|
||||
REQUIRE(1000 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(500, cb_b, CB_IDS[1], 0);
|
||||
REQUIRE(500 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_c, CB_IDS[2], 0);
|
||||
REQUIRE(500 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(100, cb_d, CB_IDS[3], 0);
|
||||
REQUIRE(100 == timing.GetTimer(0)->GetDowncount());
|
||||
timing.ScheduleEvent(1200, cb_e, CB_IDS[4], 0);
|
||||
REQUIRE(100 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 3, 400);
|
||||
AdvanceAndCheck(timing, 1, 300);
|
||||
|
@ -131,7 +90,7 @@ void FifoCallback(u64 userdata, s64 cycles_late) {
|
|||
TEST_CASE("CoreTiming[SharedSlot]", "[core]") {
|
||||
using namespace SharedSlotTest;
|
||||
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", FifoCallback<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", FifoCallback<1>);
|
||||
|
@ -139,36 +98,36 @@ TEST_CASE("CoreTiming[SharedSlot]", "[core]") {
|
|||
Core::TimingEventType* cb_d = timing.RegisterEvent("callbackD", FifoCallback<3>);
|
||||
Core::TimingEventType* cb_e = timing.RegisterEvent("callbackE", FifoCallback<4>);
|
||||
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(1000, cb_c, CB_IDS[2]);
|
||||
timing.ScheduleEvent(1000, cb_d, CB_IDS[3]);
|
||||
timing.ScheduleEvent(1000, cb_e, CB_IDS[4]);
|
||||
timing.ScheduleEvent(1000, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1], 0);
|
||||
timing.ScheduleEvent(1000, cb_c, CB_IDS[2], 0);
|
||||
timing.ScheduleEvent(1000, cb_d, CB_IDS[3], 0);
|
||||
timing.ScheduleEvent(1000, cb_e, CB_IDS[4], 0);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
REQUIRE(1000 == timing.GetDowncount());
|
||||
timing.GetTimer(0)->Advance();
|
||||
REQUIRE(1000 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
callbacks_ran_flags = 0;
|
||||
counter = 0;
|
||||
lateness = 0;
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance();
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetDowncount());
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance();
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetTimer(0)->GetDowncount());
|
||||
REQUIRE(0x1FULL == callbacks_ran_flags.to_ullong());
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[PredictableLateness]", "[core]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
timing.ScheduleEvent(100, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(200, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(100, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(200, cb_b, CB_IDS[1], 0);
|
||||
|
||||
AdvanceAndCheck(timing, 0, 90, 10, -10); // (100 - 10)
|
||||
AdvanceAndCheck(timing, 1, MAX_SLICE_LENGTH, 50, -50);
|
||||
|
@ -190,7 +149,7 @@ static void RescheduleCallback(Core::Timing& timing, u64 userdata, s64 cycles_la
|
|||
TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
||||
using namespace ChainSchedulingTest;
|
||||
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
|
||||
Core::TimingEventType* cb_a = timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::TimingEventType* cb_b = timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
@ -201,28 +160,30 @@ TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
|||
});
|
||||
|
||||
// Enter slice 0
|
||||
timing.Advance();
|
||||
timing.GetTimer(0)->Advance();
|
||||
|
||||
timing.ScheduleEvent(800, cb_a, CB_IDS[0]);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
timing.ScheduleEvent(2200, cb_c, CB_IDS[2]);
|
||||
timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs));
|
||||
REQUIRE(800 == timing.GetDowncount());
|
||||
timing.ScheduleEvent(800, cb_a, CB_IDS[0], 0);
|
||||
timing.ScheduleEvent(1000, cb_b, CB_IDS[1], 0);
|
||||
timing.ScheduleEvent(2200, cb_c, CB_IDS[2], 0);
|
||||
timing.ScheduleEvent(1000, cb_rs, reinterpret_cast<u64>(cb_rs), 0);
|
||||
REQUIRE(800 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
reschedules = 3;
|
||||
AdvanceAndCheck(timing, 0, 200); // cb_a
|
||||
AdvanceAndCheck(timing, 1, 1000); // cb_b, cb_rs
|
||||
REQUIRE(2 == reschedules);
|
||||
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance(); // cb_rs
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance(); // cb_rs
|
||||
REQUIRE(1 == reschedules);
|
||||
REQUIRE(200 == timing.GetDowncount());
|
||||
REQUIRE(200 == timing.GetTimer(0)->GetDowncount());
|
||||
|
||||
AdvanceAndCheck(timing, 2, 800); // cb_c
|
||||
|
||||
timing.AddTicks(timing.GetDowncount());
|
||||
timing.Advance(); // cb_rs
|
||||
timing.GetTimer(0)->AddTicks(timing.GetTimer(0)->GetDowncount());
|
||||
timing.GetTimer(0)->Advance(); // cb_rs
|
||||
REQUIRE(0 == reschedules);
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetDowncount());
|
||||
REQUIRE(MAX_SLICE_LENGTH == timing.GetTimer(0)->GetDowncount());
|
||||
}
|
||||
|
||||
// TODO: Add tests for multiple timers
|
||||
|
|
|
@ -21,9 +21,9 @@ static std::shared_ptr<Object> MakeObject(Kernel::KernelSystem& kernel) {
|
|||
}
|
||||
|
||||
TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1);
|
||||
auto [server, client] = kernel.CreateSessionPair();
|
||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||
|
||||
|
@ -233,9 +233,9 @@ TEST_CASE("HLERequestContext::PopulateFromIncomingCommandBuffer", "[core][kernel
|
|||
}
|
||||
|
||||
TEST_CASE("HLERequestContext::WriteToOutgoingCommandBuffer", "[core][kernel]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1);
|
||||
auto [server, client] = kernel.CreateSessionPair();
|
||||
HLERequestContext context(kernel, std::move(server), nullptr);
|
||||
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#include "core/memory.h"
|
||||
|
||||
TEST_CASE("Memory::IsValidVirtualAddress", "[core][memory]") {
|
||||
Core::Timing timing;
|
||||
Core::Timing timing(1);
|
||||
Memory::MemorySystem memory;
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0);
|
||||
Kernel::KernelSystem kernel(memory, timing, [] {}, 0, 1);
|
||||
SECTION("these regions should not be mapped on an empty process") {
|
||||
auto process = kernel.CreateProcess(kernel.CreateCodeSet("", 0));
|
||||
CHECK(Memory::IsValidVirtualAddress(*process, Memory::PROCESS_IMAGE_VADDR) == false);
|
||||
|
|
Loading…
Reference in a new issue