kern: audit (and fix) our hardware maintenance instructions to match official kernel

This commit is contained in:
Michael Scire 2021-10-27 12:31:53 -07:00
parent fb59d0ad43
commit e81a1ce5a8
16 changed files with 104 additions and 203 deletions

View file

@ -93,3 +93,4 @@
/* Deferred includes. */
#include <mesosphere/kern_k_auto_object_impls.hpp>
#include <mesosphere/kern_k_scheduler_impls.hpp>

View file

@ -279,7 +279,7 @@ namespace ams::kern::arch::arm64::init {
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
cpu::InvalidateEntireTlbInnerShareable();
/* Copy data, if we should. */
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
@ -350,7 +350,6 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L2 table, we need to make a new one. */
if (!l1_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
ClearNewPageTable(new_table);
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -361,12 +360,12 @@ namespace ams::kern::arch::arm64::init {
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
phys_addr += L2BlockSize;
size -= L2BlockSize;
}
cpu::DataSynchronizationBarrierInnerShareable();
continue;
}
@ -384,7 +383,6 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L3 table, we need to make a new one. */
if (!l2_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
ClearNewPageTable(new_table);
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -395,12 +393,12 @@ namespace ams::kern::arch::arm64::init {
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
size -= L3BlockSize;
}
cpu::DataSynchronizationBarrierInnerShareable();
continue;
}

View file

@ -60,6 +60,11 @@ namespace ams::kern::arch::arm64::cpu {
__asm__ __volatile__("isb" ::: "memory");
}
ALWAYS_INLINE void EnsureInstructionConsistencyInnerShareable() {
DataSynchronizationBarrierInnerShareable();
InstructionMemoryBarrier();
}
ALWAYS_INLINE void EnsureInstructionConsistency() {
DataSynchronizationBarrier();
InstructionMemoryBarrier();
@ -177,7 +182,6 @@ namespace ams::kern::arch::arm64::cpu {
NOINLINE void SynchronizeAllCores();
/* Cache management helpers. */
void ClearPageToZeroImpl(void *);
void StoreEntireCacheForInit();
void FlushEntireCacheForInit();
@ -190,10 +194,16 @@ namespace ams::kern::arch::arm64::cpu {
void InvalidateEntireInstructionCache();
ALWAYS_INLINE void ClearPageToZero(void *page) {
ALWAYS_INLINE void ClearPageToZero(void * const page) {
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
MESOSPHERE_ASSERT(page != nullptr);
ClearPageToZeroImpl(page);
uintptr_t cur = reinterpret_cast<uintptr_t>(__builtin_assume_aligned(page, PageSize));
const uintptr_t last = cur + PageSize - DataCacheLineSize;
for (/* ... */; cur <= last; cur += DataCacheLineSize) {
__asm__ __volatile__("dc zva, %[cur]" :: [cur]"r"(cur) : "memory");
}
}
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
@ -213,6 +223,11 @@ namespace ams::kern::arch::arm64::cpu {
EnsureInstructionConsistency();
}
ALWAYS_INLINE void InvalidateEntireTlbInnerShareable() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
EnsureInstructionConsistencyInnerShareable();
}
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
DataSynchronizationBarrier();

View file

@ -219,27 +219,27 @@ namespace ams::kern::arch::arm64 {
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
static void PteDataSynchronizationBarrier() {
static ALWAYS_INLINE void PteDataSynchronizationBarrier() {
cpu::DataSynchronizationBarrierInnerShareable();
}
static void ClearPageTable(KVirtualAddress table) {
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
cpu::ClearPageToZero(GetVoidPointer(table));
}
void OnTableUpdated() const {
ALWAYS_INLINE void OnTableUpdated() const {
cpu::InvalidateTlbByAsid(m_asid);
}
void OnKernelTableUpdated() const {
ALWAYS_INLINE void OnKernelTableUpdated() const {
cpu::InvalidateEntireTlbDataOnly();
}
void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
ALWAYS_INLINE void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
cpu::InvalidateTlbByVaDataOnly(virt_addr);
}
void NoteUpdated() const {
ALWAYS_INLINE void NoteUpdated() const {
cpu::DataSynchronizationBarrier();
if (this->IsKernel()) {
@ -249,7 +249,7 @@ namespace ams::kern::arch::arm64 {
}
}
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
ALWAYS_INLINE void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
MESOSPHERE_ASSERT(this->IsKernel());
cpu::DataSynchronizationBarrier();

View file

@ -45,6 +45,7 @@ namespace ams::kern::arch::arm64 {
/* Select L1 cache. */
cpu::SetCsselrEl1(0);
cpu::InstructionMemoryBarrier();
/* Check that the L1 cache is not direct-mapped. */
return cpu::CacheSizeIdRegisterAccessor().GetAssociativity() != 0;

View file

@ -46,7 +46,7 @@ namespace ams::kern {
return m_slab_heap->Allocate(m_page_allocator);
}
void Free(T *t) const {
ALWAYS_INLINE void Free(T *t) const {
m_slab_heap->Free(t);
}
};

View file

@ -211,18 +211,6 @@ namespace ams::kern {
static consteval bool ValidateAssemblyOffsets();
};
consteval bool KScheduler::ValidateAssemblyOffsets() {
static_assert(AMS_OFFSETOF(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING);
static_assert(AMS_OFFSETOF(KScheduler, m_state.interrupt_task_runnable) == KSCHEDULER_INTERRUPT_TASK_RUNNABLE);
static_assert(AMS_OFFSETOF(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD);
static_assert(AMS_OFFSETOF(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK);
static_assert(AMS_OFFSETOF(KScheduler, m_state.prev_thread) == KSCHEDULER_PREVIOUS_THREAD);
static_assert(AMS_OFFSETOF(KScheduler, m_state.interrupt_task_manager) == KSCHEDULER_INTERRUPT_TASK_MANAGER);
return true;
}
static_assert(KScheduler::ValidateAssemblyOffsets());
class KScopedSchedulerLock : KScopedLock<KScheduler::LockType> {
public:
explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ }

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_scheduler.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern {
/* NOTE: This header is included after all main headers. */
consteval bool KScheduler::ValidateAssemblyOffsets() {
static_assert(AMS_OFFSETOF(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING);
static_assert(AMS_OFFSETOF(KScheduler, m_state.interrupt_task_runnable) == KSCHEDULER_INTERRUPT_TASK_RUNNABLE);
static_assert(AMS_OFFSETOF(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD);
static_assert(AMS_OFFSETOF(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK);
static_assert(AMS_OFFSETOF(KScheduler, m_state.prev_thread) == KSCHEDULER_PREVIOUS_THREAD);
static_assert(AMS_OFFSETOF(KScheduler, m_state.interrupt_task_manager) == KSCHEDULER_INTERRUPT_TASK_MANAGER);
return true;
}
static_assert(KScheduler::ValidateAssemblyOffsets());
ALWAYS_INLINE void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << m_core_id); core_mask != 0) {
cpu::DataSynchronizationBarrier();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask);
}
}
}