mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-06-04 16:53:48 -04:00
kern: implement page group unmapping
This commit is contained in:
parent
25b0baae59
commit
154422562a
11 changed files with 654 additions and 12 deletions
|
@ -192,6 +192,12 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
|
||||
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
||||
register uintptr_t x18 asm("x18");
|
||||
__asm__ __volatile__("" : [x18]"=r"(x18));
|
||||
|
|
|
@ -25,6 +25,9 @@ namespace ams::kern::arch::arm64 {
|
|||
class KPageTable : public KPageTableBase {
|
||||
NON_COPYABLE(KPageTable);
|
||||
NON_MOVEABLE(KPageTable);
|
||||
public:
|
||||
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
||||
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||
private:
|
||||
KPageTableManager *manager;
|
||||
u64 ttbr;
|
||||
|
@ -93,8 +96,7 @@ namespace ams::kern::arch::arm64 {
|
|||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||
|
||||
KPageTableManager &GetPageTableManager() { return *this->manager; }
|
||||
const KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||
private:
|
||||
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||
/* Set basic attributes. */
|
||||
|
@ -197,6 +199,9 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||
|
||||
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
static void PteDataSynchronizationBarrier() {
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
@ -213,6 +218,10 @@ namespace ams::kern::arch::arm64 {
|
|||
cpu::InvalidateEntireTlbDataOnly();
|
||||
}
|
||||
|
||||
void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
|
||||
cpu::InvalidateTlbByVaDataOnly(virt_addr);
|
||||
}
|
||||
|
||||
void NoteUpdated() const {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
|
@ -223,7 +232,14 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
|
||||
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) {
|
||||
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
||||
MESOSPHERE_ASSERT(this->IsKernel());
|
||||
|
||||
cpu::DataSynchronizationBarrier();
|
||||
this->OnKernelTableSinglePageUpdated(virt_addr);
|
||||
}
|
||||
|
||||
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
|
||||
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
||||
|
||||
if (table == Null<KVirtualAddress>) {
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
constexpr size_t L1BlockSize = 1_GB;
|
||||
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
||||
constexpr size_t L2BlockSize = 2_MB;
|
||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||
constexpr size_t L3BlockSize = PageSize;
|
||||
|
|
|
@ -22,12 +22,20 @@
|
|||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
/* TODO: This seems worse than KInitialPageTable. Can we fulfill Nintendo's API using KInitialPageTable? */
|
||||
/* KInitialPageTable is significantly nicer, but doesn't have KPageTableImpl's traversal semantics. */
|
||||
/* Perhaps we could implement those on top of it? */
|
||||
class KPageTableImpl {
|
||||
NON_COPYABLE(KPageTableImpl);
|
||||
NON_MOVEABLE(KPageTableImpl);
|
||||
public:
|
||||
struct TraversalEntry {
|
||||
KPhysicalAddress phys_addr;
|
||||
size_t block_size;
|
||||
};
|
||||
|
||||
struct TraversalContext {
|
||||
const L1PageTableEntry *l1_entry;
|
||||
const L2PageTableEntry *l2_entry;
|
||||
const L3PageTableEntry *l3_entry;
|
||||
};
|
||||
private:
|
||||
static constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
||||
static constexpr size_t NumLevels = 3;
|
||||
|
@ -55,6 +63,14 @@ namespace ams::kern::arch::arm64 {
|
|||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
|
||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
|
||||
|
||||
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const;
|
||||
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||
private:
|
||||
L1PageTableEntry *table;
|
||||
bool is_kernel;
|
||||
|
@ -89,6 +105,9 @@ namespace ams::kern::arch::arm64 {
|
|||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||
L1PageTableEntry *Finalize();
|
||||
|
||||
bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const;
|
||||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||
};
|
||||
|
||||
|
|
|
@ -169,6 +169,8 @@ namespace ams::kern {
|
|||
KPageTableImpl &GetImpl() { return this->impl; }
|
||||
const KPageTableImpl &GetImpl() const { return this->impl; }
|
||||
|
||||
KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; }
|
||||
|
||||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
|
@ -212,6 +214,8 @@ namespace ams::kern {
|
|||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||
|
||||
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) const;
|
||||
|
||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
public:
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue