mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-06-02 15:49:48 -04:00
kern: continue page table refactor, implement separate/unmap
This commit is contained in:
parent
02e837d82e
commit
9610f42dc0
7 changed files with 265 additions and 322 deletions
|
@ -233,8 +233,11 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||
|
||||
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
void MergePages(TraversalContext *context, PageLinkedList *page_list);
|
||||
void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list);
|
||||
|
||||
Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
|
|
|
@ -20,18 +20,22 @@
|
|||
|
||||
namespace ams::kern::arch::arm64 {
|
||||
|
||||
constexpr size_t BlocksPerContiguousBlock = 0x10;
|
||||
constexpr size_t BlocksPerTable = PageSize / sizeof(u64);
|
||||
|
||||
constexpr size_t L1BlockSize = 1_GB;
|
||||
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
||||
constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize;
|
||||
constexpr size_t L2BlockSize = 2_MB;
|
||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||
constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize;
|
||||
constexpr size_t L3BlockSize = PageSize;
|
||||
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
||||
constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize;
|
||||
|
||||
class PageTableEntry {
|
||||
public:
|
||||
struct InvalidTag{};
|
||||
struct TableTag{};
|
||||
struct BlockTag{};
|
||||
struct SeparateContiguousTag{};
|
||||
|
||||
enum Permission : u64 {
|
||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||
|
@ -122,6 +126,25 @@ namespace ams::kern::arch::arm64 {
|
|||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
/* Construct a table. */
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks)
|
||||
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
||||
/* Construct a block. */
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page)
|
||||
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid))
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag)
|
||||
: PageTableEntry(attr, GetInteger(phys_addr))
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (m_attributes >> offset) & ((1ul << count) - 1);
|
||||
|
@ -165,7 +188,7 @@ namespace ams::kern::arch::arm64 {
|
|||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||
|
@ -194,6 +217,12 @@ namespace ams::kern::arch::arm64 {
|
|||
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); }
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); }
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); }
|
||||
constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); }
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
||||
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||
return m_attributes & BaseMask;
|
||||
|
@ -204,6 +233,38 @@ namespace ams::kern::arch::arm64 {
|
|||
return (m_attributes & BaseMaskForMerge) == attr;
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) {
|
||||
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||
if (idx == 0) {
|
||||
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
||||
} else if (idx < BlocksPerContiguousBlock - 1) {
|
||||
return BaseMask;
|
||||
} else {
|
||||
return BaseMask | ExtensionFlag_DisableMergeTail;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const {
|
||||
return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) {
|
||||
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||
if (idx == 0) {
|
||||
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
||||
} else if (idx < BlocksPerContiguousBlock) {
|
||||
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
|
||||
} else if (idx < BlocksPerTable - 1) {
|
||||
return BaseMask;
|
||||
} else {
|
||||
return BaseMask | ExtensionFlag_DisableMergeTail;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const {
|
||||
return m_attributes & GetEntryTemplateForSeparateMask(idx);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
|
||||
return m_attributes;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ namespace ams::kern::arch::arm64 {
|
|||
};
|
||||
|
||||
struct TraversalContext {
|
||||
const PageTableEntry *level_entries[EntryLevel_Count];
|
||||
PageTableEntry *level_entries[EntryLevel_Count];
|
||||
EntryLevel level;
|
||||
bool is_contiguous;
|
||||
};
|
||||
|
@ -125,6 +125,10 @@ namespace ams::kern::arch::arm64 {
|
|||
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) {
|
||||
return 1 << (PageBits + LevelBits * level + 4 * contiguous);
|
||||
}
|
||||
public:
|
||||
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
||||
|
||||
|
@ -141,6 +145,9 @@ namespace ams::kern::arch::arm64 {
|
|||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||
|
||||
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
|
||||
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue