kern: audit (and fix) our hardware maintenance instructions to match official kernel

This commit is contained in:
Michael Scire 2021-10-27 12:31:53 -07:00
parent fb59d0ad43
commit e81a1ce5a8
16 changed files with 104 additions and 203 deletions

View file

@ -279,7 +279,7 @@ namespace ams::kern::arch::arm64::init {
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
cpu::InvalidateEntireTlbInnerShareable();
/* Copy data, if we should. */
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
@ -350,7 +350,6 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L2 table, we need to make a new one. */
if (!l1_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
ClearNewPageTable(new_table);
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -361,12 +360,12 @@ namespace ams::kern::arch::arm64::init {
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
phys_addr += L2BlockSize;
size -= L2BlockSize;
}
cpu::DataSynchronizationBarrierInnerShareable();
continue;
}
@ -384,7 +383,6 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L3 table, we need to make a new one. */
if (!l2_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
ClearNewPageTable(new_table);
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -395,12 +393,12 @@ namespace ams::kern::arch::arm64::init {
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
size -= L3BlockSize;
}
cpu::DataSynchronizationBarrierInnerShareable();
continue;
}

View file

@ -60,6 +60,11 @@ namespace ams::kern::arch::arm64::cpu {
__asm__ __volatile__("isb" ::: "memory");
}
ALWAYS_INLINE void EnsureInstructionConsistencyInnerShareable() {
DataSynchronizationBarrierInnerShareable();
InstructionMemoryBarrier();
}
ALWAYS_INLINE void EnsureInstructionConsistency() {
DataSynchronizationBarrier();
InstructionMemoryBarrier();
@ -177,7 +182,6 @@ namespace ams::kern::arch::arm64::cpu {
NOINLINE void SynchronizeAllCores();
/* Cache management helpers. */
void ClearPageToZeroImpl(void *);
void StoreEntireCacheForInit();
void FlushEntireCacheForInit();
@ -190,10 +194,16 @@ namespace ams::kern::arch::arm64::cpu {
void InvalidateEntireInstructionCache();
ALWAYS_INLINE void ClearPageToZero(void *page) {
ALWAYS_INLINE void ClearPageToZero(void * const page) {
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
MESOSPHERE_ASSERT(page != nullptr);
ClearPageToZeroImpl(page);
uintptr_t cur = reinterpret_cast<uintptr_t>(__builtin_assume_aligned(page, PageSize));
const uintptr_t last = cur + PageSize - DataCacheLineSize;
for (/* ... */; cur <= last; cur += DataCacheLineSize) {
__asm__ __volatile__("dc zva, %[cur]" :: [cur]"r"(cur) : "memory");
}
}
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
@ -213,6 +223,11 @@ namespace ams::kern::arch::arm64::cpu {
EnsureInstructionConsistency();
}
ALWAYS_INLINE void InvalidateEntireTlbInnerShareable() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
EnsureInstructionConsistencyInnerShareable();
}
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
DataSynchronizationBarrier();

View file

@ -219,27 +219,27 @@ namespace ams::kern::arch::arm64 {
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
static void PteDataSynchronizationBarrier() {
static ALWAYS_INLINE void PteDataSynchronizationBarrier() {
cpu::DataSynchronizationBarrierInnerShareable();
}
static void ClearPageTable(KVirtualAddress table) {
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
cpu::ClearPageToZero(GetVoidPointer(table));
}
void OnTableUpdated() const {
ALWAYS_INLINE void OnTableUpdated() const {
cpu::InvalidateTlbByAsid(m_asid);
}
void OnKernelTableUpdated() const {
ALWAYS_INLINE void OnKernelTableUpdated() const {
cpu::InvalidateEntireTlbDataOnly();
}
void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
ALWAYS_INLINE void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
cpu::InvalidateTlbByVaDataOnly(virt_addr);
}
void NoteUpdated() const {
ALWAYS_INLINE void NoteUpdated() const {
cpu::DataSynchronizationBarrier();
if (this->IsKernel()) {
@ -249,7 +249,7 @@ namespace ams::kern::arch::arm64 {
}
}
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
ALWAYS_INLINE void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
MESOSPHERE_ASSERT(this->IsKernel());
cpu::DataSynchronizationBarrier();

View file

@ -45,6 +45,7 @@ namespace ams::kern::arch::arm64 {
/* Select L1 cache. */
cpu::SetCsselrEl1(0);
cpu::InstructionMemoryBarrier();
/* Check that the L1 cache is not direct-mapped. */
return cpu::CacheSizeIdRegisterAccessor().GetAssociativity() != 0;