mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-06-01 07:18:22 -04:00
kern: refactor init (kill identity map, merge cpu on logic)
This commit is contained in:
parent
8db22967bf
commit
035cebef9d
20 changed files with 431 additions and 387 deletions
|
@ -29,8 +29,6 @@ namespace ams::kern::init {
|
|||
u64 sp;
|
||||
u64 entrypoint;
|
||||
u64 argument;
|
||||
u64 setup_function;
|
||||
u64 exception_stack;
|
||||
};
|
||||
static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE));
|
||||
static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)));
|
||||
|
@ -45,7 +43,5 @@ namespace ams::kern::init {
|
|||
static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP);
|
||||
static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT);
|
||||
static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT);
|
||||
static_assert(AMS_OFFSETOF(KInitArguments, setup_function) == INIT_ARGUMENTS_SETUP_FUNCTION);
|
||||
static_assert(AMS_OFFSETOF(KInitArguments, exception_stack) == INIT_ARGUMENTS_EXCEPTION_STACK);
|
||||
|
||||
}
|
|
@ -23,17 +23,6 @@
|
|||
|
||||
namespace ams::kern::arch::arm64::init {
|
||||
|
||||
inline void ClearPhysicalMemory(KPhysicalAddress address, size_t size) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, sizeof(u64)));
|
||||
|
||||
/* This Physical Address -> void * conversion is valid, because this is init page table code. */
|
||||
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
|
||||
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address));
|
||||
for (size_t i = 0; i < size / sizeof(u64); ++i) {
|
||||
ptr[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTE: Nintendo uses virtual functions, rather than a concept + template. */
|
||||
template<typename T>
|
||||
concept IsInitialPageAllocator = requires (T &t, KPhysicalAddress phys_addr, size_t size) {
|
||||
|
@ -41,25 +30,23 @@ namespace ams::kern::arch::arm64::init {
|
|||
{ t.Free(phys_addr, size) } -> std::same_as<void>;
|
||||
};
|
||||
|
||||
template<IsInitialPageAllocator _PageAllocator>
|
||||
class KInitialPageTableTemplate {
|
||||
public:
|
||||
using PageAllocator = _PageAllocator;
|
||||
class KInitialPageTable {
|
||||
private:
|
||||
KPhysicalAddress m_l1_tables[2];
|
||||
u32 m_num_entries[2];
|
||||
public:
|
||||
KInitialPageTableTemplate(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) {
|
||||
template<IsInitialPageAllocator PageAllocator>
|
||||
KInitialPageTable(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) {
|
||||
/* Set tables. */
|
||||
m_l1_tables[0] = AllocateNewPageTable(allocator);
|
||||
m_l1_tables[1] = AllocateNewPageTable(allocator);
|
||||
m_l1_tables[0] = AllocateNewPageTable(allocator, 0);
|
||||
m_l1_tables[1] = AllocateNewPageTable(allocator, 0);
|
||||
|
||||
/* Set counts. */
|
||||
m_num_entries[0] = MaxPageTableEntries;
|
||||
m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1;
|
||||
}
|
||||
|
||||
KInitialPageTableTemplate() {
|
||||
KInitialPageTable() {
|
||||
/* Set tables. */
|
||||
m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize);
|
||||
m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
|
||||
|
@ -82,30 +69,35 @@ namespace ams::kern::arch::arm64::init {
|
|||
return GetInteger(m_l1_tables[1]);
|
||||
}
|
||||
private:
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address) const {
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address, u64 phys_to_virt_offset = 0) const {
|
||||
const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1;
|
||||
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index]));
|
||||
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index]) + phys_to_virt_offset);
|
||||
return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address) {
|
||||
L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable()));
|
||||
static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
|
||||
L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
|
||||
return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address) {
|
||||
L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable()));
|
||||
static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
|
||||
L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
|
||||
return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator) {
|
||||
template<IsInitialPageAllocator PageAllocator>
|
||||
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) {
|
||||
auto address = allocator.Allocate(PageSize);
|
||||
ClearNewPageTable(address);
|
||||
ClearNewPageTable(address, phys_to_virt_offset);
|
||||
return address;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
|
||||
ClearPhysicalMemory(address, PageSize);
|
||||
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) {
|
||||
/* Convert to a deferenceable address, and clear. */
|
||||
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address) + phys_to_virt_offset);
|
||||
for (size_t i = 0; i < PageSize / sizeof(u64); ++i) {
|
||||
ptr[i] = 0;
|
||||
}
|
||||
}
|
||||
public:
|
||||
static consteval size_t GetMaximumOverheadSize(size_t size) {
|
||||
|
@ -327,7 +319,8 @@ namespace ams::kern::arch::arm64::init {
|
|||
}
|
||||
}
|
||||
public:
|
||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator) {
|
||||
template<IsInitialPageAllocator PageAllocator>
|
||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator, u64 phys_to_virt_offset) {
|
||||
/* Ensure that addresses and sizes are page aligned. */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
|
@ -335,7 +328,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* Iteratively map pages until the requested region is mapped. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr, phys_to_virt_offset);
|
||||
|
||||
/* Can we make an L1 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
||||
|
@ -349,12 +342,12 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* If we don't already have an L2 table, we need to make a new one. */
|
||||
if (!l1_entry->IsTable()) {
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||
}
|
||||
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr, phys_to_virt_offset);
|
||||
|
||||
/* Can we make a contiguous L2 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
|
||||
|
@ -380,12 +373,12 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* If we don't already have an L3 table, we need to make a new one. */
|
||||
if (!l2_entry->IsTable()) {
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||
}
|
||||
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr, phys_to_virt_offset);
|
||||
|
||||
/* Can we make a contiguous L3 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
|
||||
|
@ -410,6 +403,98 @@ namespace ams::kern::arch::arm64::init {
|
|||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
}
|
||||
|
||||
void UnmapTtbr0Entries(u64 phys_to_virt_offset) {
|
||||
/* Ensure data consistency before we unmap. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
||||
/* Define helper, as we only want to clear non-nGnRE pages. */
|
||||
constexpr auto ShouldUnmap = [](const PageTableEntry *entry) ALWAYS_INLINE_LAMBDA -> bool {
|
||||
return entry->GetPageAttribute() != PageTableEntry::PageAttribute_Device_nGnRE;
|
||||
};
|
||||
|
||||
/* Iterate all L1 entries. */
|
||||
L1PageTableEntry * const l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[0]) + phys_to_virt_offset);
|
||||
for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) {
|
||||
/* Get L1 entry. */
|
||||
L1PageTableEntry * const l1_entry = l1_table + l1_index;
|
||||
if (l1_entry->IsBlock()) {
|
||||
/* Unmap the L1 entry, if we should. */
|
||||
if (ShouldUnmap(l1_entry)) {
|
||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||
}
|
||||
} else if (l1_entry->IsTable()) {
|
||||
/* Get the L2 table. */
|
||||
L2PageTableEntry * const l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset);
|
||||
|
||||
/* Unmap all L2 entries, as relevant. */
|
||||
size_t remaining_l2_entries = 0;
|
||||
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
|
||||
/* Get L2 entry. */
|
||||
L2PageTableEntry * const l2_entry = l2_table + l2_index;
|
||||
if (l2_entry->IsBlock()) {
|
||||
const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize;
|
||||
|
||||
if (ShouldUnmap(l2_entry)) {
|
||||
for (size_t i = 0; i < num_to_clear; ++i) {
|
||||
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
} else {
|
||||
remaining_l2_entries += num_to_clear;
|
||||
}
|
||||
|
||||
l2_index = l2_index + num_to_clear - 1;
|
||||
} else if (l2_entry->IsTable()) {
|
||||
/* Get the L3 table. */
|
||||
L3PageTableEntry * const l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset);
|
||||
|
||||
/* Unmap all L3 entries, as relevant. */
|
||||
size_t remaining_l3_entries = 0;
|
||||
for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) {
|
||||
/* Get L3 entry. */
|
||||
if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsBlock()) {
|
||||
const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize;
|
||||
|
||||
if (ShouldUnmap(l3_entry)) {
|
||||
for (size_t i = 0; i < num_to_clear; ++i) {
|
||||
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
||||
}
|
||||
} else {
|
||||
remaining_l3_entries += num_to_clear;
|
||||
}
|
||||
|
||||
l3_index = l3_index + num_to_clear - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we unmapped all L3 entries, clear the L2 entry. */
|
||||
if (remaining_l3_entries == 0) {
|
||||
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
||||
|
||||
/* Invalidate the entire tlb. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
} else {
|
||||
remaining_l2_entries++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If we unmapped all L2 entries, clear the L1 entry. */
|
||||
if (remaining_l2_entries == 0) {
|
||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||
|
||||
/* Invalidate the entire tlb. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidate the entire tlb. */
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
cpu::InvalidateEntireTlb();
|
||||
}
|
||||
|
||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||
/* Get the L1 entry. */
|
||||
const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
@ -861,6 +946,4 @@ namespace ams::kern::arch::arm64::init {
|
|||
};
|
||||
static_assert(IsInitialPageAllocator<KInitialPageAllocator>);
|
||||
|
||||
using KInitialPageTable = KInitialPageTableTemplate<KInitialPageAllocator>;
|
||||
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@
|
|||
#define THREAD_LOCAL_REGION_SIZE 0x200
|
||||
|
||||
/* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */
|
||||
#define INIT_ARGUMENTS_SIZE 0x60
|
||||
#define INIT_ARGUMENTS_SIZE 0x50
|
||||
#define INIT_ARGUMENTS_TTBR0 0x00
|
||||
#define INIT_ARGUMENTS_TTBR1 0x08
|
||||
#define INIT_ARGUMENTS_TCR 0x10
|
||||
|
@ -257,8 +257,6 @@
|
|||
#define INIT_ARGUMENTS_SP 0x38
|
||||
#define INIT_ARGUMENTS_ENTRYPOINT 0x40
|
||||
#define INIT_ARGUMENTS_ARGUMENT 0x48
|
||||
#define INIT_ARGUMENTS_SETUP_FUNCTION 0x50
|
||||
#define INIT_ARGUMENTS_EXCEPTION_STACK 0x58
|
||||
|
||||
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
|
||||
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */
|
||||
|
|
|
@ -189,6 +189,14 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void StoreDataCacheForInitArguments(const void *addr, size_t size) {
|
||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) {
|
||||
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory");
|
||||
}
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
/* Synchronization helpers. */
|
||||
NOINLINE void SynchronizeAllCores();
|
||||
void SynchronizeCores(u64 core_mask);
|
||||
|
|
|
@ -161,9 +161,12 @@ namespace ams::kern::arch::arm64 {
|
|||
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; }
|
||||
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->SelectBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
|
||||
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
|
||||
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
|
||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||
|
|
|
@ -25,12 +25,15 @@ namespace ams::kern::board::nintendo::nx {
|
|||
static constexpr size_t SecureAppletMemorySize = 4_MB;
|
||||
public:
|
||||
class Init : public KSystemControlBase::Init {
|
||||
private:
|
||||
friend class KSystemControlBase::Init;
|
||||
private:
|
||||
static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
public:
|
||||
/* Initialization. */
|
||||
static size_t GetRealMemorySize();
|
||||
static size_t GetIntendedMemorySize();
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
static size_t GetApplicationPoolSize();
|
||||
static size_t GetAppletPoolSize();
|
||||
static size_t GetMinimumNonSecureSystemPoolSize();
|
||||
|
|
|
@ -26,6 +26,6 @@ namespace ams::kern::init {
|
|||
|
||||
static_assert(util::IsPowerOfTwo(alignof(KInitArguments)) && util::IsPowerOfTwo(sizeof(KInitArguments)));
|
||||
|
||||
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
|
||||
KInitArguments *GetInitArguments(s32 core_id);
|
||||
|
||||
}
|
||||
|
|
|
@ -183,10 +183,10 @@ namespace ams::kern {
|
|||
return std::make_tuple(total_size, kernel_size);
|
||||
}
|
||||
|
||||
static void InitializeLinearMemoryAddresses(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) {
|
||||
static void InitializeLinearMemoryAddresses(u64 phys_to_virt_diff) {
|
||||
/* Set static differences. */
|
||||
s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
|
||||
s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
|
||||
s_linear_phys_to_virt_diff = phys_to_virt_diff;
|
||||
s_linear_virt_to_phys_diff = -phys_to_virt_diff;
|
||||
}
|
||||
|
||||
static void InitializeLinearMemoryRegionTrees();
|
||||
|
|
|
@ -21,6 +21,12 @@ namespace ams::kern {
|
|||
|
||||
struct InitialProcessBinaryLayout;
|
||||
|
||||
namespace init {
|
||||
|
||||
struct KInitArguments;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace ams::kern {
|
||||
|
@ -40,6 +46,8 @@ namespace ams::kern {
|
|||
static constinit inline KSpinLock s_random_lock;
|
||||
public:
|
||||
class Init {
|
||||
private:
|
||||
static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
public:
|
||||
/* Initialization. */
|
||||
static size_t GetRealMemorySize();
|
||||
|
@ -47,7 +55,7 @@ namespace ams::kern {
|
|||
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
|
||||
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args);
|
||||
static size_t GetApplicationPoolSize();
|
||||
static size_t GetAppletPoolSize();
|
||||
static size_t GetMinimumNonSecureSystemPoolSize();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue