From 4580a352c09dcd5dde6c2916e617ad4c0ecbf899 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 30 Apr 2025 22:31:25 -0700 Subject: [PATCH] kern: use callback to note pte updates in KPageTableImpl --- .../mesosphere/arch/arm64/kern_k_page_table.hpp | 5 +++++ .../mesosphere/arch/arm64/kern_k_page_table_impl.hpp | 6 ++++-- .../source/arch/arm64/kern_k_page_table.cpp | 11 +++++------ .../source/arch/arm64/kern_k_page_table_impl.cpp | 11 +++++++---- 4 files changed, 21 insertions(+), 12 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index f889d199a..7bc1a96f1 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -200,6 +200,11 @@ namespace ams::kern::arch::arm64 { NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index); Result Finalize(); + + static void NoteUpdatedCallback(const void *pt) { + /* Note the update. */ + static_cast(pt)->NoteUpdated(); + } private: Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index e8965c4d8..b1a97fca0 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -49,6 +49,8 @@ namespace ams::kern::arch::arm64 { EntryLevel level; bool is_contiguous; }; + + using EntryUpdatedCallback = void (*)(const void *); private: static constexpr size_t PageBits = util::CountTrailingZeros(PageSize); static constexpr size_t NumLevels = 3; @@ -144,8 +146,8 @@ namespace ams::kern::arch::arm64 { bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const; - static bool MergePages(KVirtualAddress *out, TraversalContext *context); - void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; + static bool MergePages(KVirtualAddress *out, TraversalContext *context, EntryUpdatedCallback on_entry_updated, const void *pt); + void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte, EntryUpdatedCallback on_entry_updated, const void *pt) const; KProcessAddress GetAddressForContext(const TraversalContext *context) const { KProcessAddress addr = m_is_kernel ? static_cast(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0; diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index d13f81979..abc3b1076 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -752,13 +752,10 @@ namespace ams::kern::arch::arm64 { while (true) { /* Try to merge. */ KVirtualAddress freed_table = Null; - if (!impl.MergePages(std::addressof(freed_table), context)) { + if (!impl.MergePages(std::addressof(freed_table), context, &KPageTable::NoteUpdatedCallback, this)) { break; } - /* Note that we updated. */ - this->NoteUpdated(); - /* Free the page. */ if (freed_table != Null) { ClearPageTable(freed_table); @@ -816,8 +813,7 @@ namespace ams::kern::arch::arm64 { } /* Separate. */ - impl.SeparatePages(entry, context, virt_addr, GetPointer(table)); - this->NoteUpdated(); + impl.SeparatePages(entry, context, virt_addr, GetPointer(table), &KPageTable::NoteUpdatedCallback, this); } R_SUCCEED(); @@ -1025,6 +1021,9 @@ namespace ams::kern::arch::arm64 { /* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */ ApplyEntryTemplate(entry_template, flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None); + + /* Wait for pending stores to complete. */ + cpu::DataSynchronizationBarrierInnerShareableStore(); } /* We've succeeded, now perform what coalescing we can. */ diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 150aa1d1c..5ce64e3f6 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -219,7 +219,7 @@ namespace ams::kern::arch::arm64 { return is_block; } - bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) { + bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context, EntryUpdatedCallback on_entry_updated, const void *pt) { /* We want to upgrade the pages by one step. */ if (context->is_contiguous) { /* We can't merge an L1 table. */ @@ -251,6 +251,7 @@ namespace ams::kern::arch::arm64 { const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled()); *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false); + on_entry_updated(pt); /* Update our context. */ context->is_contiguous = false; @@ -285,6 +286,7 @@ namespace ams::kern::arch::arm64 { for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3); } + on_entry_updated(pt); /* Update our context. */ context->level_entries[context->level] = pte; @@ -294,7 +296,7 @@ namespace ams::kern::arch::arm64 { return true; } - void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const { + void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte, EntryUpdatedCallback on_entry_updated, const void *pt) const { /* We want to downgrade the pages by one step. */ if (context->is_contiguous) { /* We want to downgrade a contiguous mapping to a non-contiguous mapping. */ @@ -305,6 +307,7 @@ namespace ams::kern::arch::arm64 { for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{}); } + on_entry_updated(pt); context->is_contiguous = false; @@ -325,12 +328,12 @@ namespace ams::kern::arch::arm64 { /* Update the block entry to be a table entry. */ *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable); - + on_entry_updated(pt); context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level); } - entry->sw_reserved_bits = 0; + entry->sw_reserved_bits = context->level_entries[context->level]->GetSoftwareReservedBits(); entry->attr = 0; entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level); entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);