mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-24 11:46:58 -04:00
kern: Implement most of memory init (all cores hit main, but still more to do)
This commit is contained in:
parent
b2e522c0a0
commit
e7dee2a9fc
22 changed files with 1246 additions and 81 deletions
|
@ -19,6 +19,8 @@ namespace ams::kern::arm64::cpu {
|
|||
|
||||
namespace {
|
||||
|
||||
std::atomic<s32> g_all_core_sync_count;
|
||||
|
||||
void FlushEntireDataCacheImpl(int level) {
|
||||
/* Used in multiple locations. */
|
||||
const u64 level_sel_value = static_cast<u64>(level << 1);
|
||||
|
@ -28,7 +30,7 @@ namespace ams::kern::arm64::cpu {
|
|||
cpu::InstructionMemoryBarrier();
|
||||
|
||||
/* Get cache size id info. */
|
||||
CacheSizeIdAccessor ccsidr_el1;
|
||||
CacheSizeIdRegisterAccessor ccsidr_el1;
|
||||
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
||||
const int num_ways = ccsidr_el1.GetAssociativity();
|
||||
const int line_size = ccsidr_el1.GetLineSize();
|
||||
|
@ -49,7 +51,7 @@ namespace ams::kern::arm64::cpu {
|
|||
}
|
||||
|
||||
void FlushEntireDataCacheShared() {
|
||||
CacheLineIdAccessor clidr_el1;
|
||||
CacheLineIdRegisterAccessor clidr_el1;
|
||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
|
@ -59,11 +61,28 @@ namespace ams::kern::arm64::cpu {
|
|||
}
|
||||
|
||||
void FlushEntireDataCacheLocal() {
|
||||
CacheLineIdAccessor clidr_el1;
|
||||
CacheLineIdRegisterAccessor clidr_el1;
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
for (int level = levels_of_unification - 1; level >= 0; level--) {
|
||||
FlushEntireDataCacheImpl(level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void SynchronizeAllCores() {
|
||||
/* Wait until the count can be read. */
|
||||
while (!(g_all_core_sync_count < static_cast<s32>(cpu::NumCores))) { /* ... */ }
|
||||
|
||||
const s32 per_core_idx = g_all_core_sync_count.fetch_add(1);
|
||||
|
||||
/* Loop until it's our turn. This will act on each core in order. */
|
||||
while (g_all_core_sync_count != per_core_idx + static_cast<s32>(cpu::NumCores)) { /* ... */ }
|
||||
|
||||
if (g_all_core_sync_count != 2 * static_cast<s32>(cpu::NumCores) - 1) {
|
||||
g_all_core_sync_count++;
|
||||
} else {
|
||||
g_all_core_sync_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
|
||||
constexpr size_t ReservedEarlyDramSize = 0x60000;
|
||||
|
||||
}
|
||||
|
||||
namespace init {
|
||||
|
||||
void SetupDevicePhysicalMemoryBlocks() {
|
||||
/* TODO: Give these constexpr defines somewhere? */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70006000, 0x40, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E400, 0xC00, KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptController | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
}
|
||||
|
||||
void SetupDramPhysicalMemoryBlocks() {
|
||||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
||||
|
||||
/* Insert blocks into the tree. */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -24,7 +24,7 @@ namespace ams::kern {
|
|||
/* TODO: Move this into a header for the MC in general. */
|
||||
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
|
||||
u32 config_value;
|
||||
MESOSPHERE_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
||||
return static_cast<size_t>(config_value & 0x3FFF) << 20;
|
||||
}
|
||||
|
||||
|
@ -40,24 +40,24 @@ namespace ams::kern {
|
|||
return value;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE size_t GetIntendedMemorySizeForInit() {
|
||||
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
||||
case smc::MemorySize_4GB:
|
||||
default: /* All invalid modes should go to 4GB. */
|
||||
return 4_GB;
|
||||
case smc::MemorySize_6GB:
|
||||
return 6_GB;
|
||||
case smc::MemorySize_8GB:
|
||||
return 8_GB;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Initialization. */
|
||||
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
||||
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
||||
case smc::MemorySize_4GB:
|
||||
default: /* All invalid modes should go to 4GB. */
|
||||
return 4_GB;
|
||||
case smc::MemorySize_6GB:
|
||||
return 6_GB;
|
||||
case smc::MemorySize_8GB:
|
||||
return 8_GB;
|
||||
}
|
||||
}
|
||||
|
||||
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
|
||||
const size_t real_dram_size = GetRealMemorySizeForInit();
|
||||
const size_t intended_dram_size = GetIntendedMemorySizeForInit();
|
||||
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||
if (intended_dram_size * 2 < real_dram_size) {
|
||||
return base_address;
|
||||
} else {
|
||||
|
@ -69,9 +69,13 @@ namespace ams::kern {
|
|||
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
||||
}
|
||||
|
||||
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||
smc::init::CpuOn(core_id, entrypoint, arg);
|
||||
}
|
||||
|
||||
/* Randomness for Initialization. */
|
||||
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
|
||||
MESOSPHERE_ABORT_UNLESS(size <= 0x38);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
|
||||
smc::init::GenerateRandomBytes(dst, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,6 +103,11 @@ namespace ams::kern::smc {
|
|||
/* SMC functionality needed for init. */
|
||||
namespace init {
|
||||
|
||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
|
||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
||||
}
|
||||
|
||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
||||
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
|
||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
||||
|
|
|
@ -79,6 +79,7 @@ namespace ams::kern::smc {
|
|||
|
||||
namespace init {
|
||||
|
||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
||||
void GenerateRandomBytes(void *dst, size_t size);
|
||||
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
||||
|
|
223
libraries/libmesosphere/source/kern_k_memory_layout.cpp
Normal file
223
libraries/libmesosphere/source/kern_k_memory_layout.cpp
Normal file
|
@ -0,0 +1,223 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
bool KMemoryBlockTree::Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
|
||||
/* Locate the memory block that contains the address. */
|
||||
auto it = this->FindContainingBlock(address);
|
||||
|
||||
/* We require that the old attr is correct. */
|
||||
if (it->GetAttributes() != old_attr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We further require that the block can be split from the old block. */
|
||||
const uintptr_t inserted_block_end = address + size;
|
||||
const uintptr_t inserted_block_last = inserted_block_end - 1;
|
||||
if (it->GetLastAddress() < inserted_block_last) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Further, we require that the type id is a valid transformation. */
|
||||
if (!it->CanDerive(type_id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Cache information from the block before we remove it. */
|
||||
KMemoryBlock *cur_block = std::addressof(*it);
|
||||
const uintptr_t old_address = it->GetAddress();
|
||||
const size_t old_size = it->GetSize();
|
||||
const uintptr_t old_end = old_address + old_size;
|
||||
const uintptr_t old_last = old_end - 1;
|
||||
const uintptr_t old_pair = it->GetPairAddress();
|
||||
const u32 old_type = it->GetType();
|
||||
|
||||
/* Erase the existing block from the tree. */
|
||||
this->erase(it);
|
||||
|
||||
/* If we need to insert a block before the region, do so. */
|
||||
if (old_address != address) {
|
||||
new (cur_block) KMemoryBlock(old_address, address - old_address, old_pair, old_attr, old_type);
|
||||
this->insert(*cur_block);
|
||||
cur_block = KMemoryLayout::GetMemoryBlockAllocator().Allocate();
|
||||
}
|
||||
|
||||
/* Insert a new block. */
|
||||
const uintptr_t new_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (address - old_address) : old_pair;
|
||||
new (cur_block) KMemoryBlock(address, size, new_pair, new_attr, type_id);
|
||||
this->insert(*cur_block);
|
||||
|
||||
/* If we need to insert a block after the region, do so. */
|
||||
if (old_last != inserted_block_last) {
|
||||
const uintptr_t after_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (inserted_block_end - old_address) : old_pair;
|
||||
this->insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(inserted_block_end, old_end - inserted_block_end, after_pair, old_attr, old_type));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
KVirtualAddress KMemoryBlockTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
|
||||
/* We want to find the total extents of the type id. */
|
||||
const auto extents = this->GetDerivedRegionExtents(type_id);
|
||||
|
||||
/* Ensure that our alignment is correct. */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.first_block->GetAddress(), alignment));
|
||||
|
||||
const uintptr_t first_address = extents.first_block->GetAddress();
|
||||
const uintptr_t last_address = extents.last_block->GetLastAddress();
|
||||
|
||||
while (true) {
|
||||
const uintptr_t candidate = util::AlignDown(KSystemControl::Init::GenerateRandomRange(first_address, last_address), alignment);
|
||||
|
||||
/* Ensure that the candidate doesn't overflow with the size. */
|
||||
if (!(candidate < candidate + size)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const uintptr_t candidate_last = candidate + size - 1;
|
||||
|
||||
/* Ensure that the candidate fits within the region. */
|
||||
if (candidate_last > last_address) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Locate the candidate block, and ensure it fits. */
|
||||
const KMemoryBlock *candidate_block = std::addressof(*this->FindContainingBlock(candidate));
|
||||
if (candidate_last > candidate_block->GetLastAddress()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Ensure that the block has the correct type id. */
|
||||
if (candidate_block->GetType() != type_id)
|
||||
continue;
|
||||
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryLayout::InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) {
|
||||
/* Set static differences. */
|
||||
s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
|
||||
s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
|
||||
|
||||
/* Initialize linear trees. */
|
||||
for (auto &block : GetPhysicalMemoryBlockTree()) {
|
||||
if (!block.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||
continue;
|
||||
}
|
||||
GetPhysicalLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType()));
|
||||
}
|
||||
|
||||
for (auto &block : GetVirtualMemoryBlockTree()) {
|
||||
if (!block.IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||
continue;
|
||||
}
|
||||
GetVirtualLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType()));
|
||||
}
|
||||
}
|
||||
|
||||
namespace init {
|
||||
|
||||
namespace {
|
||||
|
||||
|
||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
|
||||
constexpr size_t CoreLocalRegionAlign = PageSize;
|
||||
constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores);
|
||||
constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize;
|
||||
constexpr size_t CoreLocalRegionBoundsAlign = 1_GB;
|
||||
/* TODO: static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); */
|
||||
|
||||
KVirtualAddress GetCoreLocalRegionVirtualAddress() {
|
||||
while (true) {
|
||||
const uintptr_t candidate_start = GetInteger(KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(CoreLocalRegionSizeWithGuards, CoreLocalRegionAlign, KMemoryRegionType_None));
|
||||
const uintptr_t candidate_end = candidate_start + CoreLocalRegionSizeWithGuards;
|
||||
const uintptr_t candidate_last = candidate_end - 1;
|
||||
|
||||
const KMemoryBlock *containing_block = std::addressof(*KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(candidate_start));
|
||||
|
||||
if (candidate_last > containing_block->GetLastAddress()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (containing_block->GetType() != KMemoryRegionType_None) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign) != util::AlignDown(candidate_last, CoreLocalRegionBoundsAlign)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (containing_block->GetAddress() > util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (util::AlignUp(candidate_last, CoreLocalRegionBoundsAlign) - 1 > containing_block->GetLastAddress()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return candidate_start + PageSize;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) {
|
||||
const KVirtualAddress core_local_virt_start = GetCoreLocalRegionVirtualAddress();
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(core_local_virt_start), CoreLocalRegionSize, KMemoryRegionType_CoreLocal));
|
||||
|
||||
/* Allocate a page for each core. */
|
||||
KPhysicalAddress core_local_region_start_phys[cpu::NumCores] = {};
|
||||
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||
core_local_region_start_phys[i] = page_allocator.Allocate();
|
||||
}
|
||||
|
||||
/* Allocate an l1 page table for each core. */
|
||||
KPhysicalAddress core_l1_ttbr1_phys[cpu::NumCores] = {};
|
||||
core_l1_ttbr1_phys[0] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
|
||||
for (size_t i = 1; i < cpu::NumCores; i++) {
|
||||
core_l1_ttbr1_phys[i] = page_allocator.Allocate();
|
||||
std::memcpy(reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[i])), reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[0])), PageSize);
|
||||
}
|
||||
|
||||
/* Use the l1 page table for each core to map the core local region for each core. */
|
||||
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||
KInitialPageTable temp_pt(core_l1_ttbr1_phys[i], KInitialPageTable::NoClear{});
|
||||
temp_pt.Map(core_local_virt_start, PageSize, core_l1_ttbr1_phys[i], KernelRwDataAttribute, page_allocator);
|
||||
for (size_t j = 0; j < cpu::NumCores; j++) {
|
||||
temp_pt.Map(core_local_virt_start + (j + 1) * PageSize, PageSize, core_l1_ttbr1_phys[j], KernelRwDataAttribute, page_allocator);
|
||||
}
|
||||
|
||||
/* Setup the InitArguments. */
|
||||
SetInitArguments(static_cast<s32>(i), core_local_region_start_phys[i], GetInteger(core_l1_ttbr1_phys[i]));
|
||||
}
|
||||
|
||||
/* Ensure the InitArguments are flushed to cache. */
|
||||
StoreInitArguments();
|
||||
}
|
||||
|
||||
void SetupPoolPartitionMemoryBlocks() {
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
25
libraries/libmesosphere/source/kern_main.cpp
Normal file
25
libraries/libmesosphere/source/kern_main.cpp
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
NORETURN void HorizonKernelMain(s32 core_id) {
|
||||
cpu::SynchronizeAllCores();
|
||||
while (true) { /* ... */ }
|
||||
}
|
||||
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue