mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-06-04 08:29:44 -04:00
kern: Implement most of memory init (all cores hit main, but still more to do)
This commit is contained in:
parent
b2e522c0a0
commit
e7dee2a9fc
22 changed files with 1246 additions and 81 deletions
|
@ -17,7 +17,7 @@
|
|||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_panic.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include "../kern_cpu.hpp"
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern::init {
|
||||
|
||||
|
@ -190,10 +190,14 @@ namespace ams::kern::init {
|
|||
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
|
||||
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
|
||||
};
|
||||
|
||||
struct NoClear{};
|
||||
private:
|
||||
KPhysicalAddress l1_table;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) {
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||
ClearNewPageTable(this->l1_table);
|
||||
}
|
||||
|
||||
|
@ -224,9 +228,9 @@ namespace ams::kern::init {
|
|||
public:
|
||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Iteratively map pages until the requested region is mapped. */
|
||||
while (size > 0) {
|
||||
|
@ -309,10 +313,37 @@ namespace ams::kern::init {
|
|||
}
|
||||
}
|
||||
|
||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||
/* Get the L1 entry. */
|
||||
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||
}
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||
|
||||
/* Get the L2 entry. */
|
||||
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
|
||||
if (l2_entry->IsBlock()) {
|
||||
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
|
||||
}
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||
|
||||
/* Get the L3 entry. */
|
||||
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
|
||||
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
|
||||
}
|
||||
|
||||
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
|
@ -360,8 +391,8 @@ namespace ams::kern::init {
|
|||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||
while (size > 0) {
|
||||
|
@ -371,9 +402,9 @@ namespace ams::kern::init {
|
|||
if (l1_entry->IsBlock()) {
|
||||
/* Ensure that we are allowed to have an L1 block here. */
|
||||
const KPhysicalAddress block = l1_entry->GetBlock();
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L1 block. */
|
||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||
|
@ -389,7 +420,7 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
if (l2_entry->IsBlock()) {
|
||||
|
@ -397,14 +428,14 @@ namespace ams::kern::init {
|
|||
|
||||
if (l2_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L2 block. */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -419,10 +450,10 @@ namespace ams::kern::init {
|
|||
size -= L2ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L2 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L2 block. */
|
||||
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
||||
|
@ -440,23 +471,23 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
/* Not a block, so we must be a table. */
|
||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||
|
||||
/* We must have a mapped l3 entry to reprotect. */
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||
const KPhysicalAddress block = l3_entry->GetBlock();
|
||||
|
||||
if (l3_entry->IsContiguous()) {
|
||||
/* Ensure that we are allowed to have a contiguous L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||
|
||||
/* Invalidate the existing contiguous L3 block. */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
/* Ensure that the entry is valid. */
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -471,10 +502,10 @@ namespace ams::kern::init {
|
|||
size -= L3ContiguousBlockSize;
|
||||
} else {
|
||||
/* Ensure that we are allowed to have an L3 block here. */
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||
|
||||
/* Invalidate the existing L3 block. */
|
||||
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
|
||||
|
@ -505,14 +536,18 @@ namespace ams::kern::init {
|
|||
this->next_address = address;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetFinalState() {
|
||||
ALWAYS_INLINE uintptr_t GetFinalNextAddress() {
|
||||
const uintptr_t final_address = this->next_address;
|
||||
this->next_address = Null<uintptr_t>;
|
||||
return final_address;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uintptr_t GetFinalState() {
|
||||
return this->GetFinalNextAddress();
|
||||
}
|
||||
public:
|
||||
virtual KPhysicalAddress Allocate() override {
|
||||
MESOSPHERE_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
||||
const uintptr_t allocated = this->next_address;
|
||||
this->next_address += PageSize;
|
||||
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
||||
|
|
|
@ -59,6 +59,9 @@ namespace ams::kern::arm64::cpu {
|
|||
EnsureInstructionConsistency();
|
||||
}
|
||||
|
||||
/* Synchronization helpers. */
|
||||
NOINLINE void SynchronizeAllCores();
|
||||
|
||||
/* Cache management helpers. */
|
||||
void FlushEntireDataCacheShared();
|
||||
void FlushEntireDataCacheLocal();
|
||||
|
|
|
@ -37,8 +37,8 @@ namespace ams::kern::arm64::cpu {
|
|||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||
|
||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
|
||||
|
||||
|
@ -48,19 +48,88 @@ namespace ams::kern::arm64::cpu {
|
|||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
|
||||
|
||||
/* Base class for register accessors. */
|
||||
class GenericRegisterAccessor {
|
||||
class GenericRegisterAccessorBase {
|
||||
NON_COPYABLE(GenericRegisterAccessorBase);
|
||||
NON_MOVEABLE(GenericRegisterAccessorBase);
|
||||
private:
|
||||
u64 value;
|
||||
public:
|
||||
ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetValue() const {
|
||||
return this->value;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->value >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
};
|
||||
|
||||
/* Special code for main id register. */
|
||||
class MainIdRegisterAccessor : public GenericRegisterAccessor {
|
||||
template<typename Derived>
|
||||
class GenericRegisterAccessor : public GenericRegisterAccessorBase {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ }
|
||||
protected:
|
||||
ALWAYS_INLINE void Store() const {
|
||||
static_cast<const Derived *>(this)->Store();
|
||||
}
|
||||
};
|
||||
|
||||
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor<name##RegisterAccessor>
|
||||
|
||||
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \
|
||||
ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \
|
||||
constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \
|
||||
\
|
||||
ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); }
|
||||
|
||||
/* Accessors. */
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1)
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetT1Size() const {
|
||||
const size_t shift_value = this->GetBits(16, 6);
|
||||
return size_t(1) << (size_t(64) - shift_value);
|
||||
}
|
||||
};
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff0() const {
|
||||
return this->GetBits(0, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff1() const {
|
||||
return this->GetBits(8, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff2() const {
|
||||
return this->GetBits(16, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAff3() const {
|
||||
return this->GetBits(32, 8);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const {
|
||||
constexpr u64 Mask = 0x000000FF00FFFF00ul;
|
||||
return this->GetValue() & Mask;
|
||||
}
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) {
|
||||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1)
|
||||
};
|
||||
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) {
|
||||
public:
|
||||
enum class Implementer {
|
||||
ArmLimited = 0x41,
|
||||
|
@ -70,7 +139,7 @@ namespace ams::kern::arm64::cpu {
|
|||
CortexA57 = 0xD07,
|
||||
};
|
||||
public:
|
||||
ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
|
||||
return static_cast<Implementer>(this->GetBits(24, 8));
|
||||
|
@ -94,9 +163,9 @@ namespace ams::kern::arm64::cpu {
|
|||
};
|
||||
|
||||
/* Accessors for cache registers. */
|
||||
class CacheLineIdAccessor : public GenericRegisterAccessor {
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
|
||||
public:
|
||||
ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
|
||||
return static_cast<int>(this->GetBits(24, 3));
|
||||
|
@ -109,9 +178,9 @@ namespace ams::kern::arm64::cpu {
|
|||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
class CacheSizeIdAccessor : public GenericRegisterAccessor {
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) {
|
||||
public:
|
||||
ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ }
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1)
|
||||
public:
|
||||
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
|
||||
return static_cast<int>(this->GetBits(13, 15));
|
||||
|
@ -128,6 +197,8 @@ namespace ams::kern::arm64::cpu {
|
|||
/* TODO: Other bitfield accessors? */
|
||||
};
|
||||
|
||||
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS
|
||||
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS
|
||||
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
|
||||
#undef MESOSPHERE_CPU_GET_SYSREG
|
||||
#undef MESOSPHERE_CPU_SET_SYSREG
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue