kern: refactor to use m_ for member variables

This commit is contained in:
Michael Scire 2020-12-17 17:18:47 -08:00 committed by SciresM
parent 0bf2ade76f
commit 968f50bc07
135 changed files with 3727 additions and 3734 deletions

View file

@ -44,16 +44,16 @@ namespace ams::kern::arch::arm64::init {
struct NoClear{};
private:
KPhysicalAddress l1_table;
KPhysicalAddress m_l1_table;
public:
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : m_l1_table(l1) { /* ... */ }
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
ClearNewPageTable(this->l1_table);
ClearNewPageTable(m_l1_table);
}
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
return GetInteger(this->l1_table);
return GetInteger(m_l1_table);
}
private:
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
@ -83,7 +83,7 @@ namespace ams::kern::arch::arm64::init {
const KVirtualAddress end_virt_addr = virt_addr + size;
size_t count = 0;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
@ -137,7 +137,7 @@ namespace ams::kern::arch::arm64::init {
const KVirtualAddress end_virt_addr = virt_addr + size;
size_t count = 0;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
@ -194,7 +194,7 @@ namespace ams::kern::arch::arm64::init {
}
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
if (l1_entry->IsBlock()) {
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
@ -301,7 +301,7 @@ namespace ams::kern::arch::arm64::init {
/* Iteratively map pages until the requested region is mapped. */
while (size > 0) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* Can we make an L1 block? */
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
@ -382,7 +382,7 @@ namespace ams::kern::arch::arm64::init {
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
/* Get the L1 entry. */
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
const L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
if (l1_entry->IsBlock()) {
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
@ -444,7 +444,7 @@ namespace ams::kern::arch::arm64::init {
};
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* If an L1 block is mapped, update. */
if (l1_entry->IsBlock()) {
@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64::init {
const KVirtualAddress end_virt_addr = virt_addr + size;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* If an L1 block is mapped, the address isn't free. */
if (l1_entry->IsBlock()) {
@ -534,7 +534,7 @@ namespace ams::kern::arch::arm64::init {
/* Iteratively reprotect pages until the requested region is reprotected. */
while (size > 0) {
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
/* Check if an L1 block is present. */
if (l1_entry->IsBlock()) {
@ -680,43 +680,43 @@ namespace ams::kern::arch::arm64::init {
uintptr_t free_bitmap;
};
private:
State state;
State m_state;
public:
constexpr ALWAYS_INLINE KInitialPageAllocator() : state{} { /* ... */ }
constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ }
ALWAYS_INLINE void Initialize(uintptr_t address) {
this->state.next_address = address + BITSIZEOF(this->state.free_bitmap) * PageSize;
this->state.free_bitmap = ~uintptr_t();
m_state.next_address = address + BITSIZEOF(m_state.free_bitmap) * PageSize;
m_state.free_bitmap = ~uintptr_t();
}
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
this->state = *reinterpret_cast<State *>(state_val);
m_state = *reinterpret_cast<State *>(state_val);
} else {
this->state.next_address = state_val;
this->state.free_bitmap = 0;
m_state.next_address = state_val;
m_state.free_bitmap = 0;
}
}
ALWAYS_INLINE void GetFinalState(State *out) {
*out = this->state;
this->state = {};
*out = m_state;
m_state = {};
}
public:
virtual KPhysicalAddress Allocate() override {
MESOSPHERE_INIT_ABORT_UNLESS(this->state.next_address != Null<uintptr_t>);
uintptr_t allocated = this->state.next_address;
if (this->state.free_bitmap != 0) {
MESOSPHERE_INIT_ABORT_UNLESS(m_state.next_address != Null<uintptr_t>);
uintptr_t allocated = m_state.next_address;
if (m_state.free_bitmap != 0) {
u64 index;
uintptr_t mask;
do {
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(this->state.free_bitmap) - 1);
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(m_state.free_bitmap) - 1);
mask = (static_cast<uintptr_t>(1) << index);
} while ((this->state.free_bitmap & mask) == 0);
this->state.free_bitmap &= ~mask;
allocated = this->state.next_address - ((BITSIZEOF(this->state.free_bitmap) - index) * PageSize);
} while ((m_state.free_bitmap & mask) == 0);
m_state.free_bitmap &= ~mask;
allocated = m_state.next_address - ((BITSIZEOF(m_state.free_bitmap) - index) * PageSize);
} else {
this->state.next_address += PageSize;
m_state.next_address += PageSize;
}
ClearPhysicalMemory(allocated, PageSize);

View file

@ -135,36 +135,36 @@ namespace ams::kern::arch::arm64::cpu {
NON_COPYABLE(GenericRegisterAccessorBase);
NON_MOVEABLE(GenericRegisterAccessorBase);
private:
u64 value;
u64 m_value;
public:
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : m_value(v) { /* ... */ }
protected:
constexpr ALWAYS_INLINE u64 GetValue() const {
return this->value;
return m_value;
}
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (this->value >> offset) & ((1ul << count) - 1);
return (m_value >> offset) & ((1ul << count) - 1);
}
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
this->value &= ~mask;
this->value |= (value & (mask >> offset)) << offset;
m_value &= ~mask;
m_value |= (value & (mask >> offset)) << offset;
}
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
this->value &= ~mask;
this->value |= (value & mask);
m_value &= ~mask;
m_value |= (value & mask);
}
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
const u64 mask = 1ul << offset;
if (enabled) {
this->value |= mask;
m_value |= mask;
} else {
this->value &= ~mask;
m_value &= ~mask;
}
}
};

View file

@ -21,9 +21,9 @@ namespace ams::kern::arch::arm64 {
class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase {
private:
s64 maximum_time;
s64 m_maximum_time;
public:
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), m_maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
public:
/* Public API. */
NOINLINE void Initialize();
@ -38,7 +38,7 @@ namespace ams::kern::arch::arm64 {
KScopedSpinLock lk(this->GetLock());
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
if (task_time <= this->maximum_time) {
if (task_time <= m_maximum_time) {
SetCompareValue(task_time);
EnableInterrupt();
}

View file

@ -47,18 +47,18 @@ namespace ams::kern::arch::arm64 {
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
};
private:
KCoreLocalInterruptEntry core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
KInterruptController interrupt_controller{};
KInterruptController::LocalState local_states[cpu::NumCores]{};
bool local_state_saved[cpu::NumCores]{};
mutable KSpinLock global_interrupt_lock{};
KGlobalInterruptEntry global_interrupts[KInterruptController::NumGlobalInterrupts]{};
KInterruptController::GlobalState global_state{};
bool global_state_saved{};
KCoreLocalInterruptEntry m_core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
KInterruptController m_interrupt_controller{};
KInterruptController::LocalState m_local_states[cpu::NumCores]{};
bool m_local_state_saved[cpu::NumCores]{};
mutable KSpinLock m_global_interrupt_lock{};
KGlobalInterruptEntry m_global_interrupts[KInterruptController::NumGlobalInterrupts]{};
KInterruptController::GlobalState m_global_state{};
bool m_global_state_saved{};
private:
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return this->global_interrupt_lock; }
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return this->global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return m_global_interrupt_lock; }
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return m_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return m_core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
bool OnHandleInterrupt();
public:
@ -71,15 +71,15 @@ namespace ams::kern::arch::arm64 {
NOINLINE void Restore(s32 core_id);
bool IsInterruptDefined(s32 irq) const {
return this->interrupt_controller.IsInterruptDefined(irq);
return m_interrupt_controller.IsInterruptDefined(irq);
}
bool IsGlobal(s32 irq) const {
return this->interrupt_controller.IsGlobal(irq);
return m_interrupt_controller.IsGlobal(irq);
}
bool IsLocal(s32 irq) const {
return this->interrupt_controller.IsLocal(irq);
return m_interrupt_controller.IsLocal(irq);
}
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
@ -89,11 +89,11 @@ namespace ams::kern::arch::arm64 {
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
m_interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
}
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
this->interrupt_controller.SendInterProcessorInterrupt(irq);
m_interrupt_controller.SendInterProcessorInterrupt(irq);
}
static void HandleInterrupt(bool user_mode);

View file

@ -92,15 +92,15 @@ namespace ams::kern::arch::arm64 {
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
}
private:
KPageTableManager *manager;
u64 ttbr;
u8 asid;
KPageTableManager *m_manager;
u64 m_ttbr;
u8 m_asid;
protected:
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
KPageTableManager &GetPageTableManager() const { return *this->manager; }
KPageTableManager &GetPageTableManager() const { return *m_manager; }
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Set basic attributes. */
@ -166,13 +166,13 @@ namespace ams::kern::arch::arm64 {
return entry;
}
public:
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
constexpr KPageTable() : KPageTableBase(), m_manager(), m_ttbr(), m_asid() { /* ... */ }
static NOINLINE void Initialize(s32 core_id);
ALWAYS_INLINE void Activate(u32 proc_id) {
cpu::DataSynchronizationBarrier();
cpu::SwitchProcess(this->ttbr, proc_id);
cpu::SwitchProcess(m_ttbr, proc_id);
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
@ -225,7 +225,7 @@ namespace ams::kern::arch::arm64 {
}
void OnTableUpdated() const {
cpu::InvalidateTlbByAsid(this->asid);
cpu::InvalidateTlbByAsid(m_asid);
}
void OnKernelTableUpdated() const {

View file

@ -105,50 +105,50 @@ namespace ams::kern::arch::arm64 {
ContigType_Contiguous = (0x1ul << 52),
};
protected:
u64 attributes;
u64 m_attributes;
public:
/* Take in a raw attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry() : m_attributes() { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : m_attributes(attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : m_attributes(0) { /* ... */ }
/* Extend a previous attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : m_attributes(rhs.m_attributes | new_attr) { /* ... */ }
/* Construct a new attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
{
/* ... */
}
protected:
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (this->attributes >> offset) & ((1ul << count) - 1);
return (m_attributes >> offset) & ((1ul << count) - 1);
}
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
return this->attributes & (((1ul << count) - 1) << offset);
return m_attributes & (((1ul << count) - 1) << offset);
}
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
this->attributes &= ~mask;
this->attributes |= (value & (mask >> offset)) << offset;
m_attributes &= ~mask;
m_attributes |= (value & (mask >> offset)) << offset;
}
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
this->attributes &= ~mask;
this->attributes |= (value & mask);
m_attributes &= ~mask;
m_attributes |= (value & mask);
}
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
const u64 mask = 1ul << offset;
if (enabled) {
this->attributes |= mask;
m_attributes |= mask;
} else {
this->attributes &= ~mask;
m_attributes &= ~mask;
}
}
public:
@ -167,9 +167,9 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
constexpr ALWAYS_INLINE bool IsTable() const { return (this->attributes & ExtensionFlag_TestTableMask) == 2; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return (this->attributes & ExtensionFlag_TestTableMask) == 0; }
constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
@ -185,21 +185,21 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
return this->attributes & BaseMask;
return m_attributes & BaseMask;
}
constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const {
constexpr u64 BaseMaskForMerge = ~static_cast<u64>(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail);
return (this->attributes & BaseMaskForMerge) == attr;
return (m_attributes & BaseMaskForMerge) == attr;
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
return this->attributes;
return m_attributes;
}
protected:
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
return this->attributes;
return m_attributes;
}
};
@ -262,7 +262,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
@ -322,7 +322,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
@ -376,7 +376,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {

View file

@ -77,16 +77,16 @@ namespace ams::kern::arch::arm64 {
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
private:
L1PageTableEntry *table;
bool is_kernel;
u32 num_entries;
L1PageTableEntry *m_table;
bool m_is_kernel;
u32 m_num_entries;
public:
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
return table + index * sizeof(PageTableEntry);
}
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1)));
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(m_table), GetL1Index(address) & (m_num_entries - 1)));
}
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
@ -105,7 +105,7 @@ namespace ams::kern::arch::arm64 {
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
}
public:
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
constexpr KPageTableImpl() : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);

View file

@ -21,274 +21,274 @@ namespace ams::kern::arch::arm64 {
class KProcessPageTable {
private:
KPageTable page_table;
KPageTable m_page_table;
public:
constexpr KProcessPageTable() : page_table() { /* ... */ }
constexpr KProcessPageTable() : m_page_table() { /* ... */ }
void Activate(u64 id) {
/* Activate the page table with the specified contextidr. */
this->page_table.Activate(id);
m_page_table.Activate(id);
}
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
return m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
}
void Finalize() { this->page_table.Finalize(); }
void Finalize() { m_page_table.Finalize(); }
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
return this->page_table.SetMemoryPermission(addr, size, perm);
return m_page_table.SetMemoryPermission(addr, size, perm);
}
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
return m_page_table.SetProcessMemoryPermission(addr, size, perm);
}
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
return this->page_table.SetMemoryAttribute(addr, size, mask, attr);
return m_page_table.SetMemoryAttribute(addr, size, mask, attr);
}
Result SetHeapSize(KProcessAddress *out, size_t size) {
return this->page_table.SetHeapSize(out, size);
return m_page_table.SetHeapSize(out, size);
}
Result SetMaxHeapSize(size_t size) {
return this->page_table.SetMaxHeapSize(size);
return m_page_table.SetMaxHeapSize(size);
}
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
return this->page_table.QueryInfo(out_info, out_page_info, addr);
return m_page_table.QueryInfo(out_info, out_page_info, addr);
}
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
return this->page_table.QueryPhysicalAddress(out, address);
return m_page_table.QueryPhysicalAddress(out, address);
}
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
return this->page_table.QueryStaticMapping(out, address, size);
return m_page_table.QueryStaticMapping(out, address, size);
}
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
return this->page_table.QueryIoMapping(out, address, size);
return m_page_table.QueryIoMapping(out, address, size);
}
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.MapMemory(dst_address, src_address, size);
return m_page_table.MapMemory(dst_address, src_address, size);
}
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.UnmapMemory(dst_address, src_address, size);
return m_page_table.UnmapMemory(dst_address, src_address, size);
}
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.MapCodeMemory(dst_address, src_address, size);
return m_page_table.MapCodeMemory(dst_address, src_address, size);
}
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.UnmapCodeMemory(dst_address, src_address, size);
return m_page_table.UnmapCodeMemory(dst_address, src_address, size);
}
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
return this->page_table.MapIo(phys_addr, size, perm);
return m_page_table.MapIo(phys_addr, size, perm);
}
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
return this->page_table.MapStatic(phys_addr, size, perm);
return m_page_table.MapStatic(phys_addr, size, perm);
}
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
return this->page_table.MapRegion(region_type, perm);
return m_page_table.MapRegion(region_type, perm);
}
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPageGroup(addr, pg, state, perm);
return m_page_table.MapPageGroup(addr, pg, state, perm);
}
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
return this->page_table.UnmapPageGroup(address, pg, state);
return m_page_table.UnmapPageGroup(address, pg, state);
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPages(out_addr, num_pages, state, perm);
return m_page_table.MapPages(out_addr, num_pages, state, perm);
}
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPages(address, num_pages, state, perm);
return m_page_table.MapPages(address, num_pages, state, perm);
}
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
return this->page_table.UnmapPages(addr, num_pages, state);
return m_page_table.UnmapPages(addr, num_pages, state);
}
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
return this->page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
return m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
}
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
return this->page_table.InvalidateProcessDataCache(address, size);
return m_page_table.InvalidateProcessDataCache(address, size);
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
return this->page_table.ReadDebugMemory(buffer, address, size);
return m_page_table.ReadDebugMemory(buffer, address, size);
}
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
return this->page_table.WriteDebugMemory(address, buffer, size);
return m_page_table.WriteDebugMemory(address, buffer, size);
}
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
return m_page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
}
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
return this->page_table.UnlockForDeviceAddressSpace(address, size);
return m_page_table.UnlockForDeviceAddressSpace(address, size);
}
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
return m_page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
}
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
}
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
return this->page_table.LockForIpcUserBuffer(out, address, size);
return m_page_table.LockForIpcUserBuffer(out, address, size);
}
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
return this->page_table.UnlockForIpcUserBuffer(address, size);
return m_page_table.UnlockForIpcUserBuffer(address, size);
}
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
return this->page_table.LockForTransferMemory(out, address, size, perm);
return m_page_table.LockForTransferMemory(out, address, size, perm);
}
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
return this->page_table.UnlockForTransferMemory(address, size, pg);
return m_page_table.UnlockForTransferMemory(address, size, pg);
}
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
return this->page_table.LockForCodeMemory(out, address, size);
return m_page_table.LockForCodeMemory(out, address, size);
}
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
return this->page_table.UnlockForCodeMemory(address, size, pg);
return m_page_table.UnlockForCodeMemory(address, size, pg);
}
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
return this->page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
}
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
return this->page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
return m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
}
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
return this->page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
return m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
}
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
return this->page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
return m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
}
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
return this->page_table.CopyMemoryFromHeapToHeap(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
return m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
}
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
return this->page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
return m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
}
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
return this->page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.page_table, test_perm, dst_state, send);
return m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send);
}
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) {
return this->page_table.CleanupForIpcServer(address, size, dst_state, server_process);
return m_page_table.CleanupForIpcServer(address, size, dst_state, server_process);
}
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
return this->page_table.CleanupForIpcClient(address, size, dst_state);
return m_page_table.CleanupForIpcClient(address, size, dst_state);
}
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
return this->page_table.MapPhysicalMemory(address, size);
return m_page_table.MapPhysicalMemory(address, size);
}
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
return this->page_table.UnmapPhysicalMemory(address, size);
return m_page_table.UnmapPhysicalMemory(address, size);
}
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
return this->page_table.MapPhysicalMemoryUnsafe(address, size);
return m_page_table.MapPhysicalMemoryUnsafe(address, size);
}
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
return this->page_table.UnmapPhysicalMemoryUnsafe(address, size);
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
}
void DumpMemoryBlocks() const {
return this->page_table.DumpMemoryBlocks();
return m_page_table.DumpMemoryBlocks();
}
void DumpPageTable() const {
return this->page_table.DumpPageTable();
return m_page_table.DumpPageTable();
}
size_t CountPageTables() const {
return this->page_table.CountPageTables();
return m_page_table.CountPageTables();
}
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
return this->page_table.GetPhysicalAddress(out, address);
return m_page_table.GetPhysicalAddress(out, address);
}
bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); }
bool Contains(KProcessAddress addr, size_t size) const { return m_page_table.Contains(addr, size); }
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); }
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInUnsafeAliasRegion(addr, size); }
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInAliasRegion(addr, size); }
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); }
KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); }
KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); }
KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); }
KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); }
KProcessAddress GetAliasCodeRegionStart() const { return this->page_table.GetAliasCodeRegionStart(); }
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }
KProcessAddress GetAliasRegionStart() const { return m_page_table.GetAliasRegionStart(); }
KProcessAddress GetStackRegionStart() const { return m_page_table.GetStackRegionStart(); }
KProcessAddress GetKernelMapRegionStart() const { return m_page_table.GetKernelMapRegionStart(); }
KProcessAddress GetAliasCodeRegionStart() const { return m_page_table.GetAliasCodeRegionStart(); }
size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); }
size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); }
size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); }
size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); }
size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return this->page_table.GetAliasCodeRegionSize(); }
size_t GetAddressSpaceSize() const { return m_page_table.GetAddressSpaceSize(); }
size_t GetHeapRegionSize() const { return m_page_table.GetHeapRegionSize(); }
size_t GetAliasRegionSize() const { return m_page_table.GetAliasRegionSize(); }
size_t GetStackRegionSize() const { return m_page_table.GetStackRegionSize(); }
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
size_t GetNormalMemorySize() const { return this->page_table.GetNormalMemorySize(); }
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
size_t GetCodeSize() const { return this->page_table.GetCodeSize(); }
size_t GetCodeDataSize() const { return this->page_table.GetCodeDataSize(); }
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }
size_t GetCodeDataSize() const { return m_page_table.GetCodeDataSize(); }
size_t GetAliasCodeSize() const { return this->page_table.GetAliasCodeSize(); }
size_t GetAliasCodeDataSize() const { return this->page_table.GetAliasCodeDataSize(); }
size_t GetAliasCodeSize() const { return m_page_table.GetAliasCodeSize(); }
size_t GetAliasCodeDataSize() const { return m_page_table.GetAliasCodeDataSize(); }
u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); }
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const {
return this->page_table.GetHeapPhysicalAddress(address);
return m_page_table.GetHeapPhysicalAddress(address);
}
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const {
return this->page_table.GetHeapVirtualAddress(address);
return m_page_table.GetHeapVirtualAddress(address);
}
KBlockInfoManager *GetBlockInfoManager() {
return this->page_table.GetBlockInfoManager();
return m_page_table.GetBlockInfoManager();
}
};

View file

@ -21,19 +21,19 @@ namespace ams::kern::arch::arm64 {
class KNotAlignedSpinLock {
private:
u32 packed_tickets;
u32 m_packed_tickets;
public:
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
constexpr KNotAlignedSpinLock() : m_packed_tickets(0) { /* ... */ }
ALWAYS_INLINE void Lock() {
u32 tmp0, tmp1, tmp2;
__asm__ __volatile__(
" prfm pstl1keep, %[packed_tickets]\n"
" prfm pstl1keep, %[m_packed_tickets]\n"
"1:\n"
" ldaxr %w[tmp0], %[packed_tickets]\n"
" ldaxr %w[tmp0], %[m_packed_tickets]\n"
" add %w[tmp2], %w[tmp0], #0x10000\n"
" stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n"
" stxr %w[tmp1], %w[tmp2], %[m_packed_tickets]\n"
" cbnz %w[tmp1], 1b\n"
" \n"
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
@ -42,21 +42,21 @@ namespace ams::kern::arch::arm64 {
" sevl\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[packed_tickets]\n"
" ldaxrh %w[tmp1], %[m_packed_tickets]\n"
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
" b.ne 2b\n"
"3:\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets)
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [m_packed_tickets]"+Q"(m_packed_tickets)
:
: "cc", "memory"
);
}
ALWAYS_INLINE void Unlock() {
const u32 value = this->packed_tickets + 1;
const u32 value = m_packed_tickets + 1;
__asm__ __volatile__(
" stlrh %w[value], %[packed_tickets]\n"
: [packed_tickets]"+Q"(this->packed_tickets)
" stlrh %w[value], %[m_packed_tickets]\n"
: [m_packed_tickets]"+Q"(m_packed_tickets)
: [value]"r"(value)
: "memory"
);
@ -66,39 +66,39 @@ namespace ams::kern::arch::arm64 {
class KAlignedSpinLock {
private:
alignas(cpu::DataCacheLineSize) u16 current_ticket;
alignas(cpu::DataCacheLineSize) u16 next_ticket;
alignas(cpu::DataCacheLineSize) u16 m_current_ticket;
alignas(cpu::DataCacheLineSize) u16 m_next_ticket;
public:
constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ }
constexpr KAlignedSpinLock() : m_current_ticket(0), m_next_ticket(0) { /* ... */ }
ALWAYS_INLINE void Lock() {
u32 tmp0, tmp1, got_lock;
__asm__ __volatile__(
" prfm pstl1keep, %[next_ticket]\n"
" prfm pstl1keep, %[m_next_ticket]\n"
"1:\n"
" ldaxrh %w[tmp0], %[next_ticket]\n"
" ldaxrh %w[tmp0], %[m_next_ticket]\n"
" add %w[tmp1], %w[tmp0], #0x1\n"
" stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n"
" stxrh %w[got_lock], %w[tmp1], %[m_next_ticket]\n"
" cbnz %w[got_lock], 1b\n"
" \n"
" sevl\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[current_ticket]\n"
" ldaxrh %w[tmp1], %[m_current_ticket]\n"
" cmp %w[tmp1], %w[tmp0]\n"
" b.ne 2b\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket)
: [current_ticket]"Q"(this->current_ticket)
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [m_next_ticket]"+Q"(m_next_ticket)
: [m_current_ticket]"Q"(m_current_ticket)
: "cc", "memory"
);
}
ALWAYS_INLINE void Unlock() {
const u32 value = this->current_ticket + 1;
const u32 value = m_current_ticket + 1;
__asm__ __volatile__(
" stlrh %w[value], %[current_ticket]\n"
: [current_ticket]"+Q"(this->current_ticket)
" stlrh %w[value], %[m_current_ticket]\n"
: [m_current_ticket]"+Q"(m_current_ticket)
: [value]"r"(value)
: "memory"
);

View file

@ -22,16 +22,16 @@ namespace ams::kern::arch::arm64 {
class KSupervisorPageTable {
private:
KPageTable page_table;
u64 ttbr0_identity[cpu::NumCores];
KPageTable m_page_table;
u64 m_ttbr0_identity[cpu::NumCores];
public:
constexpr KSupervisorPageTable() : page_table(), ttbr0_identity() { /* ... */ }
constexpr KSupervisorPageTable() : m_page_table(), m_ttbr0_identity() { /* ... */ }
NOINLINE void Initialize(s32 core_id);
void Activate() {
/* Activate, using process id = 0xFFFFFFFF */
this->page_table.Activate(0xFFFFFFFF);
m_page_table.Activate(0xFFFFFFFF);
}
void ActivateForInit() {
@ -42,37 +42,37 @@ namespace ams::kern::arch::arm64 {
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
}
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
return this->page_table.UnmapPages(address, num_pages, state);
return m_page_table.UnmapPages(address, num_pages, state);
}
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
return m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
}
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
return this->page_table.UnmapPageGroup(address, pg, state);
return m_page_table.UnmapPageGroup(address, pg, state);
}
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
return this->page_table.GetPhysicalAddress(out, address);
return m_page_table.GetPhysicalAddress(out, address);
}
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return this->ttbr0_identity[core_id]; }
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; }
void DumpMemoryBlocks() const {
return this->page_table.DumpMemoryBlocks();
return m_page_table.DumpMemoryBlocks();
}
void DumpPageTable() const {
return this->page_table.DumpPageTable();
return m_page_table.DumpPageTable();
}
size_t CountPageTables() const {
return this->page_table.CountPageTables();
return m_page_table.CountPageTables();
}
};

View file

@ -45,19 +45,19 @@ namespace ams::kern::arch::arm64 {
u64 x28;
u64 x29;
};
} callee_saved;
u64 lr;
u64 sp;
u64 cpacr;
u64 fpcr;
u64 fpsr;
alignas(0x10) u128 fpu_registers[NumFpuRegisters];
bool locked;
} m_callee_saved;
u64 m_lr;
u64 m_sp;
u64 m_cpacr;
u64 m_fpcr;
u64 m_fpsr;
alignas(0x10) u128 m_fpu_registers[NumFpuRegisters];
bool m_locked;
private:
static void RestoreFpuRegisters64(const KThreadContext &);
static void RestoreFpuRegisters32(const KThreadContext &);
public:
constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ }
constexpr explicit KThreadContext() : m_callee_saved(), m_lr(), m_sp(), m_cpacr(), m_fpcr(), m_fpsr(), m_fpu_registers(), m_locked() { /* ... */ }
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
Result Finalize();
@ -66,17 +66,17 @@ namespace ams::kern::arch::arm64 {
static void FpuContextSwitchHandler(KThread *thread);
u32 GetFpcr() const { return this->fpcr; }
u32 GetFpsr() const { return this->fpsr; }
u32 GetFpcr() const { return m_fpcr; }
u32 GetFpsr() const { return m_fpsr; }
void SetFpcr(u32 v) { this->fpcr = v; }
void SetFpsr(u32 v) { this->fpsr = v; }
void SetFpcr(u32 v) { m_fpcr = v; }
void SetFpsr(u32 v) { m_fpsr = v; }
void CloneFpuStatus();
void SetFpuRegisters(const u128 *v, bool is_64_bit);
const u128 *GetFpuRegisters() const { return this->fpu_registers; }
const u128 *GetFpuRegisters() const { return m_fpu_registers; }
public:
static void OnThreadTerminating(const KThread *thread);
};