kern: Implement most of memory init (all cores hit main, but still more to do)

This commit is contained in:
Michael Scire 2020-01-28 22:09:47 -08:00
parent b2e522c0a0
commit e7dee2a9fc
22 changed files with 1246 additions and 81 deletions

View file

@ -27,16 +27,20 @@
/* Core pre-initialization includes. */
#include "mesosphere/kern_select_cpu.hpp"
#include "mesosphere/kern_select_k_system_control.hpp"
/* Initialization headers. */
#include "mesosphere/init/kern_init_elf.hpp"
#include "mesosphere/init/kern_init_layout.hpp"
#include "mesosphere/init/kern_init_page_table_select.hpp"
#include "mesosphere/init/kern_init_arguments_select.hpp"
#include "mesosphere/kern_k_memory_layout.hpp"
/* Core functionality. */
#include "mesosphere/kern_select_interrupts.hpp"
#include "mesosphere/kern_select_k_system_control.hpp"
/* Supervisor Calls. */
#include "mesosphere/kern_svc.hpp"
/* Main functionality. */
#include "mesosphere/kern_main.hpp"

View file

@ -17,7 +17,7 @@
#include <vapours.hpp>
#include <mesosphere/kern_panic.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include "../kern_cpu.hpp"
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern::init {
@ -190,10 +190,14 @@ namespace ams::kern::init {
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
};
struct NoClear{};
private:
KPhysicalAddress l1_table;
public:
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) {
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
ClearNewPageTable(this->l1_table);
}
@ -224,9 +228,9 @@ namespace ams::kern::init {
public:
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
/* Iteratively map pages until the requested region is mapped. */
while (size > 0) {
@ -309,10 +313,37 @@ namespace ams::kern::init {
}
}
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
/* Get the L1 entry. */
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
if (l1_entry->IsBlock()) {
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
/* Get the L2 entry. */
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
/* Get the L3 entry. */
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
}
bool IsFree(KVirtualAddress virt_addr, size_t size) {
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
const KVirtualAddress end_virt_addr = virt_addr + size;
while (virt_addr < end_virt_addr) {
@ -360,8 +391,8 @@ namespace ams::kern::init {
cpu::DataSynchronizationBarrierInnerShareable();
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
/* Iteratively reprotect pages until the requested region is reprotected. */
while (size > 0) {
@ -371,9 +402,9 @@ namespace ams::kern::init {
if (l1_entry->IsBlock()) {
/* Ensure that we are allowed to have an L1 block here. */
const KPhysicalAddress block = l1_entry->GetBlock();
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
/* Invalidate the existing L1 block. */
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
@ -389,7 +420,7 @@ namespace ams::kern::init {
}
/* Not a block, so we must be a table. */
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
@ -397,14 +428,14 @@ namespace ams::kern::init {
if (l2_entry->IsContiguous()) {
/* Ensure that we are allowed to have a contiguous L2 block here. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
/* Invalidate the existing contiguous L2 block. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
/* Ensure that the entry is valid. */
MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
}
cpu::DataSynchronizationBarrierInnerShareable();
@ -419,10 +450,10 @@ namespace ams::kern::init {
size -= L2ContiguousBlockSize;
} else {
/* Ensure that we are allowed to have an L2 block here. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
/* Invalidate the existing L2 block. */
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
@ -440,23 +471,23 @@ namespace ams::kern::init {
}
/* Not a block, so we must be a table. */
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
/* We must have a mapped l3 entry to reprotect. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
const KPhysicalAddress block = l3_entry->GetBlock();
if (l3_entry->IsContiguous()) {
/* Ensure that we are allowed to have a contiguous L3 block here. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
/* Invalidate the existing contiguous L3 block. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
/* Ensure that the entry is valid. */
MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
}
cpu::DataSynchronizationBarrierInnerShareable();
@ -471,10 +502,10 @@ namespace ams::kern::init {
size -= L3ContiguousBlockSize;
} else {
/* Ensure that we are allowed to have an L3 block here. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
/* Invalidate the existing L3 block. */
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
@ -505,14 +536,18 @@ namespace ams::kern::init {
this->next_address = address;
}
ALWAYS_INLINE uintptr_t GetFinalState() {
ALWAYS_INLINE uintptr_t GetFinalNextAddress() {
const uintptr_t final_address = this->next_address;
this->next_address = Null<uintptr_t>;
return final_address;
}
ALWAYS_INLINE uintptr_t GetFinalState() {
return this->GetFinalNextAddress();
}
public:
virtual KPhysicalAddress Allocate() override {
MESOSPHERE_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
const uintptr_t allocated = this->next_address;
this->next_address += PageSize;
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);

View file

@ -59,6 +59,9 @@ namespace ams::kern::arm64::cpu {
EnsureInstructionConsistency();
}
/* Synchronization helpers. */
NOINLINE void SynchronizeAllCores();
/* Cache management helpers. */
void FlushEntireDataCacheShared();
void FlushEntireDataCacheLocal();

View file

@ -37,8 +37,8 @@ namespace ams::kern::arm64::cpu {
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
@ -48,19 +48,88 @@ namespace ams::kern::arm64::cpu {
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
/* Base class for register accessors. */
class GenericRegisterAccessor {
class GenericRegisterAccessorBase {
NON_COPYABLE(GenericRegisterAccessorBase);
NON_MOVEABLE(GenericRegisterAccessorBase);
private:
u64 value;
public:
ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ }
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
protected:
constexpr ALWAYS_INLINE u64 GetValue() const {
return this->value;
}
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (this->value >> offset) & ((1ul << count) - 1);
}
};
/* Special code for main id register. */
class MainIdRegisterAccessor : public GenericRegisterAccessor {
template<typename Derived>
class GenericRegisterAccessor : public GenericRegisterAccessorBase {
public:
constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ }
protected:
ALWAYS_INLINE void Store() const {
static_cast<const Derived *>(this)->Store();
}
};
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor<name##RegisterAccessor>
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \
ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \
constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \
\
ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); }
/* Accessors. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1)
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
constexpr ALWAYS_INLINE size_t GetT1Size() const {
const size_t shift_value = this->GetBits(16, 6);
return size_t(1) << (size_t(64) - shift_value);
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1)
constexpr ALWAYS_INLINE u64 GetAff0() const {
return this->GetBits(0, 8);
}
constexpr ALWAYS_INLINE u64 GetAff1() const {
return this->GetBits(8, 8);
}
constexpr ALWAYS_INLINE u64 GetAff2() const {
return this->GetBits(16, 8);
}
constexpr ALWAYS_INLINE u64 GetAff3() const {
return this->GetBits(32, 8);
}
constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const {
constexpr u64 Mask = 0x000000FF00FFFF00ul;
return this->GetValue() & Mask;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1)
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) {
public:
enum class Implementer {
ArmLimited = 0x41,
@ -70,7 +139,7 @@ namespace ams::kern::arm64::cpu {
CortexA57 = 0xD07,
};
public:
ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ }
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1)
public:
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
return static_cast<Implementer>(this->GetBits(24, 8));
@ -94,9 +163,9 @@ namespace ams::kern::arm64::cpu {
};
/* Accessors for cache registers. */
class CacheLineIdAccessor : public GenericRegisterAccessor {
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
public:
ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ }
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1)
public:
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
return static_cast<int>(this->GetBits(24, 3));
@ -109,9 +178,9 @@ namespace ams::kern::arm64::cpu {
/* TODO: Other bitfield accessors? */
};
class CacheSizeIdAccessor : public GenericRegisterAccessor {
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) {
public:
ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ }
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1)
public:
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
return static_cast<int>(this->GetBits(13, 15));
@ -128,6 +197,8 @@ namespace ams::kern::arm64::cpu {
/* TODO: Other bitfield accessors? */
};
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
#undef MESOSPHERE_CPU_GET_SYSREG
#undef MESOSPHERE_CPU_SET_SYSREG

View file

@ -23,8 +23,10 @@ namespace ams::kern {
class Init {
public:
/* Initialization. */
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
/* Randomness. */
static void GenerateRandomBytes(void *dst, size_t size);

View file

@ -25,5 +25,7 @@
namespace ams::kern::init {
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg);
void StoreInitArguments();
}

View file

@ -0,0 +1,391 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/init/kern_init_page_table_select.hpp>
namespace ams::kern {
constexpr size_t KernelAslrAlignment = 2_MB;
constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul;
constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul;
constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth;
constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul;
constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
constexpr size_t KernelPhysicalAddressSpaceBase = 0ul;
constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
enum KMemoryRegionType : u32 {
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
KMemoryRegionAttr_DidKernelMap = 0x08000000,
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
KMemoryRegionAttr_UserReadOnly = 0x20000000,
KMemoryRegionAttr_NoUserMap = 0x40000000,
KMemoryRegionAttr_LinearMapped = 0x80000000,
KMemoryRegionType_None = 0,
KMemoryRegionType_Kernel = 1,
KMemoryRegionType_Dram = 2,
KMemoryRegionType_CoreLocal = 4,
KMemoryRegionType_VirtualKernelPtHeap = 0x2A,
KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A,
KMemoryRegionType_VirtualKernelInitPt = 0x19A,
KMemoryRegionType_Uart = 0x1D,
KMemoryRegionType_InterruptDistributor = 0x4D,
KMemoryRegionType_InterruptController = 0x2D,
KMemoryRegionType_MemoryController = 0x55,
KMemoryRegionType_MemoryController0 = 0x95,
KMemoryRegionType_MemoryController1 = 0x65,
KMemoryRegionType_PowerManagementController = 0x1A5,
KMemoryRegionType_KernelAutoMap = KMemoryRegionType_Kernel | KMemoryRegionAttr_ShouldKernelMap,
KMemoryRegionType_KernelTemp = 0x31,
KMemoryRegionType_KernelCode = 0x19,
KMemoryRegionType_KernelStack = 0x29,
KMemoryRegionType_KernelMisc = 0x49,
KMemoryRegionType_KernelSlab = 0x89,
KMemoryRegionType_KernelMiscMainStack = 0xB49,
KMemoryRegionType_KernelMiscMappedDevice = 0xD49,
KMemoryRegionType_KernelMiscIdleStack = 0x1349,
KMemoryRegionType_KernelMiscUnknownDebug = 0x1549,
KMemoryRegionType_KernelMiscExceptionStack = 0x2349,
KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped,
KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap,
KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
KMemoryRegionType_DramKernelSlab = 0x14E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
KMemoryRegionType_DramKernelPtHeap = 0x24E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
KMemoryRegionType_DramKernelInitPt = 0x44E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
/* These regions aren't normally mapped in retail kernel. */
KMemoryRegionType_KernelTraceBuffer = 0xA6 | KMemoryRegionAttr_UserReadOnly | KMemoryRegionAttr_LinearMapped,
KMemoryRegionType_OnMemoryBootImage = 0x156,
KMemoryRegionType_DTB = 0x256,
};
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (type_id == (type_id | KMemoryRegionType_KernelTraceBuffer)) {
return KMemoryRegionType_VirtualKernelTraceBuffer;
} else if (type_id == (type_id | KMemoryRegionType_DramKernelPtHeap)) {
return KMemoryRegionType_VirtualKernelPtHeap;
} else {
return KMemoryRegionType_Dram;
}
}
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
NON_COPYABLE(KMemoryBlock);
NON_MOVEABLE(KMemoryBlock);
private:
uintptr_t address;
uintptr_t pair_address;
size_t block_size;
u32 attributes;
u32 type_id;
public:
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
if (lhs.address < rhs.address) {
return -1;
} else if (lhs.address == rhs.address) {
return 0;
} else {
return 1;
}
}
public:
constexpr ALWAYS_INLINE KMemoryBlock() : address(0), pair_address(0), block_size(0), attributes(0), type_id(0) { /* ... */ }
constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, uintptr_t p, u32 r, u32 t) :
address(a), pair_address(p), block_size(bl), attributes(r), type_id(t)
{
/* ... */
}
constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, u32 r, u32 t) : KMemoryBlock(a, bl, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
return this->address;
}
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
return this->pair_address;
}
constexpr ALWAYS_INLINE size_t GetSize() const {
return this->block_size;
}
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr ALWAYS_INLINE u32 GetAttributes() const {
return this->attributes;
}
constexpr ALWAYS_INLINE u32 GetType() const {
return this->type_id;
}
constexpr ALWAYS_INLINE void SetType(u32 type) {
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
this->type_id = type;
}
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
return this->GetAddress() <= address && address < this->GetLastAddress();
}
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
return (this->GetType() | type) == this->GetType();
}
constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionType attr) const {
return (this->GetType() | attr) == this->GetType();
}
constexpr ALWAYS_INLINE bool CanDerive(u32 type) const {
return (this->GetType() | type) == type;
}
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
this->pair_address = a;
}
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionType attr) {
this->type_id |= attr;
}
};
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
class KMemoryBlockTree {
public:
struct DerivedRegionExtents {
const KMemoryBlock *first_block;
const KMemoryBlock *last_block;
};
private:
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
using value_type = TreeType::value_type;
using size_type = TreeType::size_type;
using difference_type = TreeType::difference_type;
using pointer = TreeType::pointer;
using const_pointer = TreeType::const_pointer;
using reference = TreeType::reference;
using const_reference = TreeType::const_reference;
using iterator = TreeType::iterator;
using const_iterator = TreeType::const_iterator;
private:
TreeType tree;
public:
constexpr ALWAYS_INLINE KMemoryBlockTree() : tree() { /* ... */ }
public:
iterator FindContainingBlock(uintptr_t address) {
for (auto it = this->begin(); it != this->end(); it++) {
if (it->Contains(address)) {
return it;
}
}
MESOSPHERE_INIT_ABORT();
}
iterator FindFirstBlockByTypeAttr(u32 type_id, u32 attr = 0) {
for (auto it = this->begin(); it != this->end(); it++) {
if (it->GetType() == type_id && it->GetAttributes() == attr) {
return it;
}
}
MESOSPHERE_INIT_ABORT();
}
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) {
DerivedRegionExtents extents = { .first_block = nullptr, .last_block = nullptr };
for (auto it = this->cbegin(); it != this->cend(); it++) {
if (it->IsDerivedFrom(type_id)) {
if (extents.first_block == nullptr) {
extents.first_block = std::addressof(*it);
}
extents.last_block = std::addressof(*it);
}
}
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_block != nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_block != nullptr);
return extents;
}
public:
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) {
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
}
public:
/* Iterator accessors. */
iterator begin() {
return this->tree.begin();
}
const_iterator begin() const {
return this->tree.begin();
}
iterator end() {
return this->tree.end();
}
const_iterator end() const {
return this->tree.end();
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
iterator iterator_to(reference ref) {
return this->tree.iterator_to(ref);
}
const_iterator iterator_to(const_reference ref) const {
return this->tree.iterator_to(ref);
}
/* Content management. */
bool empty() const {
return this->tree.empty();
}
reference back() {
return this->tree.back();
}
const_reference back() const {
return this->tree.back();
}
reference front() {
return this->tree.front();
}
const_reference front() const {
return this->tree.front();
}
/* GCC over-eagerly inlines this operation. */
NOINLINE iterator insert(reference ref) {
return this->tree.insert(ref);
}
NOINLINE iterator erase(iterator it) {
return this->tree.erase(it);
}
iterator find(const_reference ref) const {
return this->tree.find(ref);
}
iterator nfind(const_reference ref) const {
return this->tree.nfind(ref);
}
};
class KMemoryBlockAllocator {
NON_COPYABLE(KMemoryBlockAllocator);
NON_MOVEABLE(KMemoryBlockAllocator);
public:
static constexpr size_t MaxMemoryBlocks = 1000;
friend class KMemoryLayout;
private:
KMemoryBlock block_heap[MaxMemoryBlocks];
size_t num_blocks;
private:
constexpr ALWAYS_INLINE KMemoryBlockAllocator() : block_heap(), num_blocks() { /* ... */ }
public:
ALWAYS_INLINE KMemoryBlock *Allocate() {
/* Ensure we stay within the bounds of our heap. */
MESOSPHERE_INIT_ABORT_UNLESS(this->num_blocks < MaxMemoryBlocks);
return &this->block_heap[this->num_blocks++];
}
template<typename... Args>
ALWAYS_INLINE KMemoryBlock *Create(Args&&... args) {
KMemoryBlock *block = this->Allocate();
new (block) KMemoryBlock(std::forward<Args>(args)...);
return block;
}
};
class KMemoryLayout {
private:
static /* constinit */ inline uintptr_t s_linear_phys_to_virt_diff;
static /* constinit */ inline uintptr_t s_linear_virt_to_phys_diff;
static /* constinit */ inline KMemoryBlockAllocator s_block_allocator;
static /* constinit */ inline KMemoryBlockTree s_virtual_tree;
static /* constinit */ inline KMemoryBlockTree s_physical_tree;
static /* constinit */ inline KMemoryBlockTree s_virtual_linear_tree;
static /* constinit */ inline KMemoryBlockTree s_physical_linear_tree;
public:
static ALWAYS_INLINE KMemoryBlockAllocator &GetMemoryBlockAllocator() { return s_block_allocator; }
static ALWAYS_INLINE KMemoryBlockTree &GetVirtualMemoryBlockTree() { return s_virtual_tree; }
static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalMemoryBlockTree() { return s_physical_tree; }
static ALWAYS_INLINE KMemoryBlockTree &GetVirtualLinearMemoryBlockTree() { return s_virtual_linear_tree; }
static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalLinearMemoryBlockTree() { return s_physical_linear_tree; }
static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) {
return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast<u32>(core_id))->GetEndAddress();
}
static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
};
namespace init {
/* These should be generic, regardless of board. */
void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator);
void SetupPoolPartitionMemoryBlocks();
/* These may be implemented in a board-specific manner. */
void SetupDevicePhysicalMemoryBlocks();
void SetupDramPhysicalMemoryBlocks();
}
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
namespace ams::kern {
NORETURN void HorizonKernelMain(s32 core_id);
}

View file

@ -44,10 +44,18 @@ namespace ams::kern {
#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr)
#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()");
#define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true)
#define MESOSPHERE_ABORT_UNLESS(expr) \
({ \
if (AMS_UNLIKELY(!(expr))) { \
if (AMS_UNLIKELY(!(expr))) { \
MESOSPHERE_PANIC("Abort(): %s", #expr); \
} \
})
#define MESOSPHERE_INIT_ABORT_UNLESS(expr) \
({ \
if (AMS_UNLIKELY(!(expr))) { \
MESOSPHERE_INIT_ABORT(); \
} \
})