kern: use callback to note pte updates in KPageTableImpl

This commit is contained in:
Michael Scire 2025-04-30 22:31:25 -07:00 committed by SciresM
parent 28296e2aac
commit 4580a352c0
4 changed files with 21 additions and 12 deletions

View file

@ -200,6 +200,11 @@ namespace ams::kern::arch::arm64 {
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
Result Finalize(); Result Finalize();
static void NoteUpdatedCallback(const void *pt) {
/* Note the update. */
static_cast<const KPageTable *>(pt)->NoteUpdated();
}
private: private:
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);

View file

@ -49,6 +49,8 @@ namespace ams::kern::arch::arm64 {
EntryLevel level; EntryLevel level;
bool is_contiguous; bool is_contiguous;
}; };
using EntryUpdatedCallback = void (*)(const void *);
private: private:
static constexpr size_t PageBits = util::CountTrailingZeros(PageSize); static constexpr size_t PageBits = util::CountTrailingZeros(PageSize);
static constexpr size_t NumLevels = 3; static constexpr size_t NumLevels = 3;
@ -144,8 +146,8 @@ namespace ams::kern::arch::arm64 {
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const; bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
static bool MergePages(KVirtualAddress *out, TraversalContext *context); static bool MergePages(KVirtualAddress *out, TraversalContext *context, EntryUpdatedCallback on_entry_updated, const void *pt);
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte, EntryUpdatedCallback on_entry_updated, const void *pt) const;
KProcessAddress GetAddressForContext(const TraversalContext *context) const { KProcessAddress GetAddressForContext(const TraversalContext *context) const {
KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0; KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0;

View file

@ -752,13 +752,10 @@ namespace ams::kern::arch::arm64 {
while (true) { while (true) {
/* Try to merge. */ /* Try to merge. */
KVirtualAddress freed_table = Null<KVirtualAddress>; KVirtualAddress freed_table = Null<KVirtualAddress>;
if (!impl.MergePages(std::addressof(freed_table), context)) { if (!impl.MergePages(std::addressof(freed_table), context, &KPageTable::NoteUpdatedCallback, this)) {
break; break;
} }
/* Note that we updated. */
this->NoteUpdated();
/* Free the page. */ /* Free the page. */
if (freed_table != Null<KVirtualAddress>) { if (freed_table != Null<KVirtualAddress>) {
ClearPageTable(freed_table); ClearPageTable(freed_table);
@ -816,8 +813,7 @@ namespace ams::kern::arch::arm64 {
} }
/* Separate. */ /* Separate. */
impl.SeparatePages(entry, context, virt_addr, GetPointer<PageTableEntry>(table)); impl.SeparatePages(entry, context, virt_addr, GetPointer<PageTableEntry>(table), &KPageTable::NoteUpdatedCallback, this);
this->NoteUpdated();
} }
R_SUCCEED(); R_SUCCEED();
@ -1025,6 +1021,9 @@ namespace ams::kern::arch::arm64 {
/* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */ /* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */
ApplyEntryTemplate(entry_template, flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None); ApplyEntryTemplate(entry_template, flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None);
/* Wait for pending stores to complete. */
cpu::DataSynchronizationBarrierInnerShareableStore();
} }
/* We've succeeded, now perform what coalescing we can. */ /* We've succeeded, now perform what coalescing we can. */

View file

@ -219,7 +219,7 @@ namespace ams::kern::arch::arm64 {
return is_block; return is_block;
} }
bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) { bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context, EntryUpdatedCallback on_entry_updated, const void *pt) {
/* We want to upgrade the pages by one step. */ /* We want to upgrade the pages by one step. */
if (context->is_contiguous) { if (context->is_contiguous) {
/* We can't merge an L1 table. */ /* We can't merge an L1 table. */
@ -251,6 +251,7 @@ namespace ams::kern::arch::arm64 {
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled()); const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false); *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false);
on_entry_updated(pt);
/* Update our context. */ /* Update our context. */
context->is_contiguous = false; context->is_contiguous = false;
@ -285,6 +286,7 @@ namespace ams::kern::arch::arm64 {
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3); pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3);
} }
on_entry_updated(pt);
/* Update our context. */ /* Update our context. */
context->level_entries[context->level] = pte; context->level_entries[context->level] = pte;
@ -294,7 +296,7 @@ namespace ams::kern::arch::arm64 {
return true; return true;
} }
void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const { void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte, EntryUpdatedCallback on_entry_updated, const void *pt) const {
/* We want to downgrade the pages by one step. */ /* We want to downgrade the pages by one step. */
if (context->is_contiguous) { if (context->is_contiguous) {
/* We want to downgrade a contiguous mapping to a non-contiguous mapping. */ /* We want to downgrade a contiguous mapping to a non-contiguous mapping. */
@ -305,6 +307,7 @@ namespace ams::kern::arch::arm64 {
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{}); pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{});
} }
on_entry_updated(pt);
context->is_contiguous = false; context->is_contiguous = false;
@ -325,12 +328,12 @@ namespace ams::kern::arch::arm64 {
/* Update the block entry to be a table entry. */ /* Update the block entry to be a table entry. */
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable); *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable);
on_entry_updated(pt);
context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level); context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level);
} }
entry->sw_reserved_bits = 0; entry->sw_reserved_bits = context->level_entries[context->level]->GetSoftwareReservedBits();
entry->attr = 0; entry->attr = 0;
entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level); entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level);
entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);