kern: Perform page table validity pass during KPageTableImpl::InitializeForKernel

This commit is contained in:
Michael Scire 2024-10-10 19:14:07 -07:00 committed by SciresM
parent c911420d6a
commit e63cae5c77
4 changed files with 127 additions and 69 deletions

View file

@ -220,7 +220,7 @@ namespace ams::kern::arch::arm64 {
/* Remove the entries from the previous table. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear);
context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear);
}
/* If we cleared a table, we need to note that we updated and free the table. */
@ -238,7 +238,7 @@ namespace ams::kern::arch::arm64 {
context.level_entries[context.level] = pte + num_to_clear - 1;
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) {
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
@ -395,7 +395,7 @@ namespace ams::kern::arch::arm64 {
/* Remove the entries from the previous table. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear);
context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear);
}
/* If we cleared a table, we need to note that we updated and free the table. */
@ -415,7 +415,7 @@ namespace ams::kern::arch::arm64 {
context.level_entries[context.level] = pte + num_to_clear - 1;
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) {
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64 {
/* Remove entries for and free any tables. */
while (context.level < KPageTableImpl::EntryLevel_L1) {
/* If the higher-level table has entries, we don't need to do a free. */
if (context.level_entries[context.level + 1]->GetTableNumEntries() != 0) {
if (context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
@ -500,7 +500,7 @@ namespace ams::kern::arch::arm64 {
/* Remove the entry for the table one level higher. */
if (context.level + 1 < KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 2]->RemoveTableEntries(1);
context.level_entries[context.level + 2]->CloseTableReferences(1);
}
/* Advance our level. */
@ -527,7 +527,7 @@ namespace ams::kern::arch::arm64 {
/* Add the entry to the table containing this one. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->AddTableEntries(1);
context.level_entries[context.level + 1]->OpenTableReferences(1);
}
/* Decrease our level. */
@ -559,7 +559,7 @@ namespace ams::kern::arch::arm64 {
/* Add the entries to the table containing this one. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->AddTableEntries(num_ptes);
context.level_entries[context.level + 1]->OpenTableReferences(num_ptes);
}
/* Update our context. */

View file

@ -27,6 +27,60 @@ namespace ams::kern::arch::arm64 {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
/* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */
PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table };
u32 level = EntryLevel_L1;
while (level != EntryLevel_L1 || (level_entries[EntryLevel_L1] - static_cast<PageTableEntry *>(m_table)) < m_num_entries) {
/* Get the pte; it must never have the validity-extension flag set. */
auto *pte = level_entries[level];
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
/* While we're a table, recurse, fixing up the reference counts. */
while (level > EntryLevel_L3 && pte->IsMappedTable()) {
/* Count how many references are in the table. */
auto *table = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(pte->GetTable()));
size_t ref_count = 0;
for (size_t i = 0; i < BlocksPerTable; ++i) {
if (table[i].IsMapped()) {
++ref_count;
}
}
/* Set the reference count for our new page, adding one additional uncloseable reference; kernel pages must never be unreferenced. */
pte->SetTableReferenceCount(ref_count + 1).SetValid();
/* Iterate downwards. */
level -= 1;
level_entries[level] = table;
pte = level_entries[level];
/* Check that the entry isn't unexpected. */
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
}
/* We're dealing with some block. If it's mapped, set it valid. */
if (pte->IsMapped()) {
pte->SetValid();
}
/* Advance. */
while (true) {
/* Advance to the next entry at the current level. */
++level_entries[level];
if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) {
break;
}
/* If we're at the end of a level, advance upwards. */
level_entries[level++] = nullptr;
if (level > EntryLevel_L1) {
return;
}
}
}
}
L1PageTableEntry *KPageTableImpl::Finalize() {