mirror of
https://git.suyu.dev/suyu/suyu
synced 2024-12-24 18:32:49 -06:00
hle: kernel: Migrate idle threads.
This commit is contained in:
parent
479bd50b96
commit
0eeecde67c
2 changed files with 9 additions and 13 deletions
|
@ -623,7 +623,7 @@ KThread* KScheduler::GetCurrentThread() const {
|
||||||
if (auto result = current_thread.load(); result) {
|
if (auto result = current_thread.load(); result) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
return idle_thread;
|
return idle_thread.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KScheduler::GetLastContextSwitchTicks() const {
|
u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||||
|
@ -708,7 +708,7 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
||||||
if (next_thread == nullptr) {
|
if (next_thread == nullptr) {
|
||||||
next_thread = idle_thread;
|
next_thread = idle_thread.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
|
@ -769,7 +769,7 @@ void KScheduler::SwitchToCurrent() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto thread = next_thread ? next_thread : idle_thread;
|
auto thread = next_thread ? next_thread : idle_thread.get();
|
||||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||||
} while (!is_switch_pending());
|
} while (!is_switch_pending());
|
||||||
}
|
}
|
||||||
|
@ -792,13 +792,9 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Initialize() {
|
void KScheduler::Initialize() {
|
||||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
idle_thread = std::make_unique<KThread>(system.Kernel());
|
||||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
ASSERT(KThread::InitializeIdleThread(system, idle_thread.get(), core_id).IsSuccess());
|
||||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
|
||||||
auto thread_res = KThread::CreateThread(
|
|
||||||
system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
|
|
||||||
static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
|
|
||||||
idle_thread = thread_res.Unwrap().get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
|
|
||||||
/// Returns true if the scheduler is idle
|
/// Returns true if the scheduler is idle
|
||||||
[[nodiscard]] bool IsIdle() const {
|
[[nodiscard]] bool IsIdle() const {
|
||||||
return GetCurrentThread() == idle_thread;
|
return GetCurrentThread() == idle_thread.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the timestamp for the last context switch in ticks.
|
/// Gets the timestamp for the last context switch in ticks.
|
||||||
|
@ -173,12 +173,12 @@ private:
|
||||||
KThread* prev_thread{};
|
KThread* prev_thread{};
|
||||||
std::atomic<KThread*> current_thread{};
|
std::atomic<KThread*> current_thread{};
|
||||||
|
|
||||||
KThread* idle_thread;
|
std::unique_ptr<KThread> idle_thread;
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber> switch_fiber{};
|
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||||
|
|
||||||
struct SchedulingState {
|
struct SchedulingState {
|
||||||
std::atomic<bool> needs_scheduling;
|
std::atomic<bool> needs_scheduling{};
|
||||||
bool interrupt_task_thread_runnable{};
|
bool interrupt_task_thread_runnable{};
|
||||||
bool should_count_idle{};
|
bool should_count_idle{};
|
||||||
u64 idle_count{};
|
u64 idle_count{};
|
||||||
|
|
Loading…
Reference in a new issue