kern: implement dpc + skeleton rest of main

This commit is contained in:
Michael Scire 2020-02-07 19:16:09 -08:00
parent e9e949ec36
commit 1224ed8abe
14 changed files with 559 additions and 40 deletions

View file

@ -0,0 +1,151 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class KDpcTask {
private:
static inline KLightLock s_lock;
static inline KLightConditionVariable s_cond_var;
static inline u64 s_core_mask;
static inline KDpcTask *s_task;
private:
static bool HasRequest(s32 core_id) {
return (s_core_mask & (1ull << core_id)) != 0;
}
static void SetRequest(s32 core_id) {
s_core_mask |= (1ull << core_id);
}
static void ClearRequest(s32 core_id) {
s_core_mask &= ~(1ull << core_id);
}
public:
virtual void DoTask() { /* ... */ }
static void WaitForRequest() {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(&s_lock, -1ll);
}
}
static bool TimedWaitForRequest(s64 timeout) {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(&s_lock, timeout);
if (KHardwareTimer::GetTick() >= timeout) {
return false;
}
}
return true;
}
static void HandleRequest() {
/* Perform the request. */
s_task->DoTask();
/* Clear the request. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
ClearRequest(core_id);
if (s_core_mask == 0) {
s_cond_var.Broadcast();
}
}
};
/* Convenience definitions. */
constexpr s32 DpcManagerThreadPriority = 3;
constexpr s64 DpcManagerTimeout = 192'000ll; /* TODO: Constexpr conversion from 10ms */
/* Globals. */
s64 g_preemption_priorities[cpu::NumCores];
/* Manager thread functions. */
void DpcManagerNormalThreadFunction(uintptr_t arg) {
while (true) {
KDpcTask::WaitForRequest();
KDpcTask::HandleRequest();
}
}
void DpcManagerPreemptionThreadFunction(uintptr_t arg) {
s64 timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
while (true) {
if (KDpcTask::TimedWaitForRequest(timeout)) {
KDpcTask::HandleRequest();
} else {
/* Rotate the scheduler queue for each core. */
KScopedSchedulerLock lk;
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
if (const s32 priority = g_preemption_priorities[core_id]; priority > DpcManagerThreadPriority) {
KScheduler::RotateScheduledQueue(static_cast<s32>(core_id), priority);
}
}
/* Update our next timeout. */
timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
}
}
}
}
void KDpcManager::Initialize(s32 core_id, s32 priority) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *new_thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(new_thread != nullptr);
/* Launch the new thread. */
g_preemption_priorities[core_id] = priority;
if (core_id == cpu::NumCores - 1) {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id));
} else {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerPreemptionThreadFunction, 0, DpcManagerThreadPriority, core_id));
}
/* Register the new thread. */
KThread::Register(new_thread);
/* Run the thread. */
new_thread->Run();
}
void KDpcManager::HandleDpc() {
/* The only deferred procedure supported by Horizon is thread termination. */
/* Check if we need to terminate the current thread. */
KThread *cur_thread = GetCurrentThreadPointer();
if (cur_thread->IsTerminationRequested()) {
KScopedInterruptEnable ei;
cur_thread->Exit();
}
}
}

View file

@ -300,5 +300,89 @@ namespace ams::kern {
}
}
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Rotate the front of the queue to the end. */
KThread *top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread *next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
if (next_thread != top_thread) {
IncrementScheduledCount(top_thread);
IncrementScheduledCount(next_thread);
}
}
/* While we have a suggested thread, try to migrate it! */
{
KThread *suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If the next thread is a new thread that has been waiting longer than our suggestion, we prefer it to our suggestion. */
if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
suggested = nullptr;
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
/* Now that we might have migrated a thread with the same priority, check if we can do better. */
{
KThread *best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThreadPointer()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
/* If the best thread we can choose has a priority the same or worse than ours, try to migrate a higher priority thread. */
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
KThread *suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* If the suggestion's priority is the same as ours, don't bother. */
if (suggested->GetPriority() >= best_thread->GetPriority()) {
break;
}
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetScheduledNext(core_id, suggested);
}
}
}
/* After a rotation, we need a scheduler update. */
SetSchedulerUpdateNeeded();
}
}

View file

@ -17,6 +17,24 @@
namespace ams::kern {
namespace {
void CleanupKernelStack(uintptr_t stack_top) {
const uintptr_t stack_bottom = stack_top - PageSize;
KPhysicalAddress stack_paddr = Null<KPhysicalAddress>;
/* TODO: MESOSPHERE_ABORT_UNLESS(Kernel::GetSupervisorPageTable().GetPhysicalAddress(&stack_paddr, stack_bottom)); */
(void)stack_bottom;
/* TODO: MESOSPHERE_R_ABORT_UNLESS(Kernel::GetSupervisorPageTable().Unmap(...) */
(void)stack_paddr;
/* Free the stack page. */
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr));
}
}
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
/* Assert parameters are valid. */
MESOSPHERE_ASSERT_THIS();
@ -174,6 +192,32 @@ namespace ams::kern {
return ResultSuccess();
}
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
/* Get stack region for the thread. */
const auto &stack_region = KMemoryLayout::GetKernelStackRegion();
/* Allocate a page to use as the thread. */
KPageBuffer *page = KPageBuffer::Allocate();
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
/* Map the stack page. */
KProcessAddress stack_top = Null<KProcessAddress>;
{
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
/* TODO: R_TRY(Kernel::GetSupervisorPageTable().Map); ... */
(void)(stack_region);
page_guard.Cancel();
}
/* Initialize the thread. */
auto map_guard = SCOPE_GUARD { CleanupKernelStack(GetInteger(stack_top)); };
R_TRY(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
map_guard.Cancel();
return ResultSuccess();
}
void KThread::PostDestroy(uintptr_t arg) {
KProcess *owner = reinterpret_cast<KProcess *>(arg & ~1ul);
const bool resource_limit_release_hint = (arg & 1);
@ -202,6 +246,100 @@ namespace ams::kern {
/* TODO */
}
Result KThread::SetPriorityToIdle() {
MESOSPHERE_ASSERT_THIS();
/* Change both our priorities to the idle thread priority. */
const s32 old_priority = this->priority;
this->priority = IdleThreadPriority;
this->base_priority = IdleThreadPriority;
KScheduler::OnThreadPriorityChanged(this, old_priority);
return ResultSuccess();
}
void KThread::RequestSuspend(SuspendType type) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock lk;
/* Note the request in our flags. */
this->suspend_request_flags |= (1 << (ThreadState_SuspendShift + type));
/* Try to perform the suspend. */
this->TrySuspend();
}
void KThread::TrySuspend() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this->IsSuspended());
/* Ensure that we have no waiters. */
if (this->GetNumKernelWaiters() > 0) {
return;
}
MESOSPHERE_ABORT_UNLESS(this->GetNumKernelWaiters() == 0);
/* Perform the suspend. */
this->Suspend();
}
void KThread::Suspend() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this->IsSuspended());
/* Set our suspend flags in state. */
const auto old_state = this->thread_state;
this->thread_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
/* Note the state change in scheduler. */
KScheduler::OnThreadStateChanged(this, old_state);
}
Result KThread::Run() {
MESOSPHERE_ASSERT_THIS();
/* If the kernel hasn't finished initializing, then we should suspend. */
if (Kernel::GetState() != Kernel::State::Initialized) {
this->RequestSuspend(SuspendType_Init);
}
while (true) {
KScopedSchedulerLock lk;
/* If either this thread or the current thread are requesting termination, note it. */
R_UNLESS(!this->IsTerminationRequested(), svc::ResultTerminationRequested());
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Ensure our thread state is correct. */
R_UNLESS(this->GetState() == ThreadState_Initialized, svc::ResultInvalidState());
/* If the current thread has been asked to suspend, suspend it and retry. */
if (GetCurrentThread().IsSuspended()) {
GetCurrentThread().Suspend();
continue;
}
/* If we're not a kernel thread and we've been asked to suspend, suspend ourselves. */
if (this->IsUserThread() && this->IsSuspended()) {
this->Suspend();
}
/* Set our state and finish. */
this->SetState(KThread::ThreadState_Runnable);
return ResultSuccess();
}
}
void KThread::Exit() {
MESOSPHERE_ASSERT_THIS();
/* TODO */
MESOSPHERE_PANIC("KThread::Exit() would return");
}
void KThread::SetState(ThreadState state) {
MESOSPHERE_ASSERT_THIS();

View file

@ -17,6 +17,21 @@
namespace ams::kern {
namespace {
template<typename F>
ALWAYS_INLINE void DoOnEachCoreInOrder(s32 core_id, F f) {
cpu::SynchronizeAllCores();
for (size_t i = 0; i < cpu::NumCores; i++) {
if (static_cast<s32>(i) == core_id) {
f();
}
cpu::SynchronizeAllCores();
}
}
}
NORETURN void HorizonKernelMain(s32 core_id) {
/* Setup the Core Local Region, and note that we're initializing. */
Kernel::InitializeCoreLocalRegion(core_id);
@ -26,13 +41,9 @@ namespace ams::kern {
cpu::SynchronizeAllCores();
/* Initialize the main and idle thread for each core. */
/* Synchronize after each init to ensure the cores go in order. */
for (size_t i = 0; i < cpu::NumCores; i++) {
if (static_cast<s32>(i) == core_id) {
Kernel::InitializeMainAndIdleThreads(core_id);
}
cpu::SynchronizeAllCores();
}
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::InitializeMainAndIdleThreads(core_id);
});
if (core_id == 0) {
/* Initialize KSystemControl. */
@ -58,8 +69,68 @@ namespace ams::kern {
}
}
/* TODO: Implement more of Main() */
/* Initialize the supervisor page table for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
/* TODO: KPageTable::Initialize(); */
/* TODO: Kernel::GetSupervisorPageTable().Initialize(); */
});
/* Set ttbr0 for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
/* TODO: SetTtbr0(); */
});
/* NOTE: Kernel calls on each core a nullsub here on retail kernel. */
/* Register the main/idle threads and initialize the interrupt task manager. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
KThread::Register(std::addressof(Kernel::GetMainThread(core_id)));
KThread::Register(std::addressof(Kernel::GetIdleThread(core_id)));
/* TODO: Kernel::GetInterruptTaskManager().Initialize(); */
});
/* Activate the scheduler and enable interrupts. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::GetScheduler().Activate();
KInterruptManager::EnableInterrupts();
});
/* Initialize cpu interrupt threads. */
/* TODO cpu::InitializeInterruptThreads(core_id); */
/* Initialize the DPC manager. */
KDpcManager::Initialize();
cpu::SynchronizeAllCores();
/* Perform more core-0 specific initialization. */
if (core_id == 0) {
/* TODO: Initialize KWorkerThreadManager */
/* TODO: KSystemControl::InitializeSleepManagerAndAppletSecureMemory(); */
/* TODO: KDeviceAddressSpace::Initialize(); */
/* TODO: CreateAndRunInitialProcesses(); */
/* We're done initializing! */
Kernel::SetState(Kernel::State::Initialized);
/* TODO: KThread::ResumeThreadsSuspendedForInit(); */
}
cpu::SynchronizeAllCores();
/* Set the current thread priority to idle. */
GetCurrentThread().SetPriorityToIdle();
/* Exit the main thread. */
{
auto &main_thread = Kernel::GetMainThread(core_id);
main_thread.Open();
main_thread.Exit();
}
/* Main() is done, and we should never get to this point. */
MESOSPHERE_PANIC("Main Thread continued after exit.");
while (true) { /* ... */ }
}