kern: greatly improve codegen for atomics, scheduler

This commit is contained in:
Michael Scire 2021-01-08 02:13:36 -08:00
parent f051f707ed
commit 4aa18b06e8
19 changed files with 67 additions and 46 deletions

View file

@ -109,7 +109,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Wait for a request to come in. */
{
KScopedLightLock lk(m_cv_lock);
while ((m_target_cores & (1ul << core_id)) == 0) {
while ((m_target_cores.load() & (1ul << core_id)) == 0) {
m_cv.Wait(std::addressof(m_cv_lock));
}
}
@ -120,7 +120,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Broadcast, if there's nothing pending. */
{
KScopedLightLock lk(m_cv_lock);
if (m_target_cores == 0) {
if (m_target_cores.load() == 0) {
m_cv.Broadcast();
}
}
@ -163,7 +163,7 @@ namespace ams::kern::arch::arm64::cpu {
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
/* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0);
/* Set operation. */
m_operation = op;
@ -171,12 +171,13 @@ namespace ams::kern::arch::arm64::cpu {
/* For certain operations, we want to send an interrupt. */
m_target_cores = other_cores_mask;
const u64 target_mask = m_target_cores;
const u64 target_mask = m_target_cores.load();
DataSynchronizationBarrier();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
this->ProcessOperation();
while (m_target_cores != 0) {
while (m_target_cores.load() != 0) {
cpu::Yield();
}
@ -188,7 +189,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0);
/* Set operation. */
m_operation = op;
@ -198,7 +199,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Use the condvar. */
m_cv.Broadcast();
while (m_target_cores != 0) {
while (m_target_cores.load() != 0) {
m_cv.Wait(std::addressof(m_cv_lock));
}

View file

@ -208,6 +208,8 @@ namespace ams::kern::arch::arm64 {
}
Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
KScopedInterruptDisable di;
@ -222,6 +224,8 @@ namespace ams::kern::arch::arm64 {
}
Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
KScopedInterruptDisable di;
@ -244,6 +248,8 @@ namespace ams::kern::arch::arm64 {
}
Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
KScopedInterruptDisable di;

View file

@ -1163,6 +1163,7 @@ namespace ams::kern::board::nintendo::nx {
}
void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) {
MESOSPHERE_UNUSED(force);
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);

View file

@ -84,6 +84,7 @@ namespace ams::kern::board::nintendo::nx {
do {
bool res = smc::ReadWriteRegister(std::addressof(value), PmcPhysicalAddress + APBDEV_PMC_PWRGATE_STATUS, 0, 0);
MESOSPHERE_ASSERT(res);
MESOSPHERE_UNUSED(res);
} while ((value & PWRGATE_STATUS_CE123_MASK) != 0);
}

View file

@ -121,6 +121,7 @@ namespace ams::kern {
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_ASSERT(index < m_table_size);
MESOSPHERE_UNUSED(linear_id, reserved);
/* Free the entry. */
/* NOTE: This code does not check the linear id. */
@ -143,6 +144,7 @@ namespace ams::kern {
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_ASSERT(index < m_table_size);
MESOSPHERE_UNUSED(reserved);
/* Set the entry. */
Entry *entry = std::addressof(m_table[index]);

View file

@ -16,9 +16,6 @@
#include <mesosphere.hpp>
#include <mesosphere/kern_select_page_table.hpp>
#undef ALWAYS_INLINE_LAMBDA
#define ALWAYS_INLINE_LAMBDA
namespace ams::kern {
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
@ -3288,6 +3285,7 @@ namespace ams::kern {
TraversalEntry next_entry;
bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), aligned_src_start);
MESOSPHERE_ASSERT(traverse_valid);
MESOSPHERE_UNUSED(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_block_addr = next_entry.phys_addr;

View file

@ -17,6 +17,9 @@
namespace ams::kern {
#pragma GCC push_options
#pragma GCC optimize ("-O3")
bool KScheduler::s_scheduler_update_needed;
KScheduler::LockType KScheduler::s_scheduler_lock;
KSchedulerPriorityQueue KScheduler::s_priority_queue;
@ -607,4 +610,6 @@ namespace ams::kern {
}
}
#pragma GCC pop_options
}

View file

@ -63,6 +63,7 @@ namespace ams::kern {
m_tls_address = Null<KProcessAddress>;
const uintptr_t kern_stack_top_address = reinterpret_cast<uintptr_t>(kern_stack_top);
MESOSPHERE_UNUSED(kern_stack_top_address);
/* Next, assert things based on the type. */
switch (type) {
@ -1161,7 +1162,7 @@ namespace ams::kern {
/* Determine if this is the first termination request. */
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
/* Perform an atomic compare-and-swap from false to true. */
bool expected = false;
u8 expected = false;
return m_termination_requested.compare_exchange_strong(expected, true);
}();