exo2: implement through end of random cache init

This commit is contained in:
Michael Scire 2020-05-11 18:54:35 -07:00 committed by SciresM
parent f66b41c027
commit cbcd1d87fb
11 changed files with 447 additions and 11 deletions

View file

@ -16,6 +16,7 @@
#include <exosphere.hpp>
#include "secmon_boot.hpp"
#include "secmon_boot_functions.hpp"
#include "../smc/secmon_random_cache.hpp"
#include "../secmon_setup.hpp"
#include "../secmon_misc.hpp"
@ -56,13 +57,17 @@ namespace ams::secmon {
/* Setup the SoC security measures. */
secmon::SetupSocSecurity();
/* TODO: More init. */
/* Setup the Cpu core context. */
secmon::SetupCpuCoreContext();
/* Clear the crt0 code that was present in iram. */
secmon::boot::ClearIram();
/* Alert the bootloader that we're initialized. */
secmon_params.secmon_state = pkg1::SecureMonitorState_Initialized;
/* Initialize the random cache. */
secmon::smc::FillRandomCache();
}
}

View file

@ -629,6 +629,99 @@ namespace ams::secmon {
reg::Read (MC + MC_SMMU_TLB_CONFIG);
}
void SetupSecureEl2AndEl1SystemRegisters() {
/* Setup actlr_el2 and actlr_el3. */
{
util::BitPack32 actlr = {};
actlr.Set<hw::ActlrCortexA57::Cpuactlr>(1); /* Enable access to cpuactlr from lower EL. */
actlr.Set<hw::ActlrCortexA57::Cpuectlr>(1); /* Enable access to cpuectlr from lower EL. */
actlr.Set<hw::ActlrCortexA57::L2ctlr>(1); /* Enable access to l2ctlr from lower EL. */
actlr.Set<hw::ActlrCortexA57::L2actlr>(1); /* Enable access to l2actlr from lower EL. */
actlr.Set<hw::ActlrCortexA57::L2ectlr>(1); /* Enable access to l2ectlr from lower EL. */
HW_CPU_SET_ACTLR_EL3(actlr);
HW_CPU_SET_ACTLR_EL2(actlr);
}
/* Setup hcr_el2. */
{
util::BitPack64 hcr = {};
hcr.Set<hw::HcrEl2::Rw>(1); /* EL1 is aarch64 mode. */
HW_CPU_SET_HCR_EL2(hcr);
}
/* Configure all domain access permissions as manager. */
HW_CPU_SET_DACR32_EL2(~0u);
/* Setup sctlr_el1. */
{
util::BitPack64 sctlr = { hw::SctlrEl1::Res1 };
sctlr.Set<hw::SctlrEl1::M>(0); /* Globally disable the MMU. */
sctlr.Set<hw::SctlrEl1::A>(0); /* Disable alignment fault checking. */
sctlr.Set<hw::SctlrEl1::C>(0); /* Globally disable the data and unified caches. */
sctlr.Set<hw::SctlrEl1::Sa>(1); /* Enable stack alignment checking. */
sctlr.Set<hw::SctlrEl1::Sa0>(1); /* Enable el0 stack alignment checking. */
sctlr.Set<hw::SctlrEl1::Cp15BEn>(1); /* Enable cp15 barrier operations. */
sctlr.Set<hw::SctlrEl1::Thee>(0); /* Disable ThumbEE. */
sctlr.Set<hw::SctlrEl1::Itd>(0); /* Enable itd instructions. */
sctlr.Set<hw::SctlrEl1::Sed>(0); /* Enable setend instruction. */
sctlr.Set<hw::SctlrEl1::Uma>(0); /* Disable el0 interrupt mask access. */
sctlr.Set<hw::SctlrEl1::I>(0); /* Globally disable the instruction cache. */
sctlr.Set<hw::SctlrEl1::Dze>(0); /* Disable el0 access to dc zva instruction. */
sctlr.Set<hw::SctlrEl1::Ntwi>(1); /* wfi instructions in el0 trap. */
sctlr.Set<hw::SctlrEl1::Ntwe>(1); /* wfe instructions in el0 trap. */
sctlr.Set<hw::SctlrEl1::Wxn>(0); /* Do not force writable pages to be ExecuteNever. */
sctlr.Set<hw::SctlrEl1::E0e>(0); /* Data accesses in el0 are little endian. */
sctlr.Set<hw::SctlrEl1::Ee>(0); /* Exceptions should be little endian. */
sctlr.Set<hw::SctlrEl1::Uci>(0); /* Disable el0 access to dc cvau, dc civac, dc cvac, ic ivau. */
HW_CPU_SET_SCTLR_EL1(sctlr);
}
/* Setup sctlr_el2. */
{
util::BitPack64 sctlr = { hw::SctlrEl2::Res1 }; // 0x30C5083
sctlr.Set<hw::SctlrEl2::M>(0); /* Globally disable the MMU. */
sctlr.Set<hw::SctlrEl2::A>(0); /* Disable alignment fault checking. */
sctlr.Set<hw::SctlrEl2::C>(0); /* Globally disable the data and unified caches. */
sctlr.Set<hw::SctlrEl2::Sa>(1); /* Enable stack alignment checking. */
sctlr.Set<hw::SctlrEl2::I>(0); /* Globally disable the instruction cache. */
sctlr.Set<hw::SctlrEl2::Wxn>(0); /* Do not force writable pages to be ExecuteNever. */
sctlr.Set<hw::SctlrEl2::Ee>(0); /* Exceptions should be little endian. */
HW_CPU_SET_SCTLR_EL2(sctlr);
}
/* Ensure instruction consistency. */
hw::InstructionSynchronizationBarrier();
}
void SetupNonSecureSystemRegisters(u32 tsc_frequency) {
/* Set cntfrq_el0. */
HW_CPU_SET_CNTFRQ_EL0(tsc_frequency);
/* Set cnthctl_el2. */
{
util::BitPack32 cnthctl = {};
cnthctl.Set<hw::CnthctlEl2::El1PctEn>(1); /* Do not trap accesses to cntpct_el0. */
cnthctl.Set<hw::CnthctlEl2::El1PcEn>(1); /* Do not trap accesses to cntp_ctl_el0, cntp_cval_el0, and cntp_tval_el0. */
cnthctl.Set<hw::CnthctlEl2::EvntEn>(0); /* Disable the event stream. */
cnthctl.Set<hw::CnthctlEl2::EvntDir>(0); /* Trigger events on 0 -> 1 transition. */
cnthctl.Set<hw::CnthctlEl2::EvntI>(0); /* Select bit0 of cntpct_el0 as the event stream trigger. */
HW_CPU_SET_CNTHCTL_EL2(cnthctl);
}
/* Ensure instruction consistency. */
hw::InstructionSynchronizationBarrier();
}
}
void Setup1() {
@ -747,4 +840,34 @@ namespace ams::secmon {
}
void SetupCpuCoreContext() {
/* Get the tsc frequency. */
const u32 tsc_frequency = reg::Read(MemoryRegionVirtualDeviceSysCtr0.GetAddress() + SYSCTR0_CNTFID0);
/* Setup the secure EL2/EL1 system registers. */
SetupSecureEl2AndEl1SystemRegisters();
/* Setup the non-secure system registers. */
SetupNonSecureSystemRegisters(tsc_frequency);
/* Reset the cpu flow controller registers. */
flow::ResetCpuRegisters(hw::GetCurrentCoreId());
/* Initialize the core unique gic registers. */
gic::InitializeCoreUnique();
/* Configure cpu fiq. */
constexpr int FiqInterruptId = 28;
gic::SetPriority (FiqInterruptId, gic::HighestPriority);
gic::SetInterruptGroup(FiqInterruptId, 0);
gic::SetEnable (FiqInterruptId, true);
/* Restore the cpu's debug registers. */
RestoreDebugRegisters();
}
void SetupCpuSErrorDebug() {
}
}

View file

@ -24,6 +24,8 @@ namespace ams::secmon {
constexpr inline int KernelCarveoutCount = 2;
void SetupCpuMemoryControllersEnableMmu();
void SetupCpuCoreContext();
void SetupCpuSErrorDebug();
void SetupSocDmaControllers();
void SetupSocSecurity();

View file

@ -122,7 +122,7 @@ namespace ams::secmon {
void EnableMmu() {
/* Create sctlr value. */
util::BitPack32 sctlr = { hw::SctlrEl3::Res1 };
util::BitPack64 sctlr = { hw::SctlrEl3::Res1 };
sctlr.Set<hw::SctlrEl3::M>(1); /* Globally enable the MMU. */
sctlr.Set<hw::SctlrEl3::A>(0); /* Disable alignment fault checking. */
sctlr.Set<hw::SctlrEl3::C>(1); /* Globally enable the data and unified caches. */

View file

@ -0,0 +1,103 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <exosphere.hpp>
#include "secmon_smc_common.hpp"
#include "secmon_random_cache.hpp"
namespace ams::secmon::smc {
namespace {
constexpr inline size_t MaxRandomBytes = sizeof(SmcArguments) - sizeof(SmcArguments{}.r[0]);
constinit int g_random_offset_low = 0;
constinit int g_random_offset_high = 0;
void FillRandomCache(int offset, int size) {
/* Get the cache. */
u8 * const random_cache_loc = GetRandomBytesCache() + offset;
/* Flush the region we're about to fill to ensure consistency with the SE. */
hw::FlushDataCache(random_cache_loc, size);
hw::DataSynchronizationBarrierInnerShareable();
/* Generate random bytes. */
se::GenerateRandomBytes(random_cache_loc, size);
hw::DataSynchronizationBarrierInnerShareable();
/* Flush to ensure the CPU sees consistent data for the region. */
hw::FlushDataCache(random_cache_loc, size);
hw::DataSynchronizationBarrierInnerShareable();
}
}
void FillRandomCache() {
/* Fill the cache. */
FillRandomCache(0, GetRandomBytesCacheSize());
/* Set the extents. */
g_random_offset_low = 0;
g_random_offset_high = GetRandomBytesCacheSize() - 1;
}
void RefillRandomCache() {
/* Check that we need to do any refilling. */
if (const int used_start = (g_random_offset_high + 1) % GetRandomBytesCacheSize(); used_start != g_random_offset_low) {
if (used_start < g_random_offset_low) {
/* The region we need to fill is after used_start but before g_random_offset_low. */
const auto size = g_random_offset_low - used_start;
FillRandomCache(used_start, size);
g_random_offset_high += size;
} else {
/* We need to fill the space from high to the end and from low to start. */
const int high_size = GetRandomBytesCacheSize() - used_start;
if (high_size > 0) {
FillRandomCache(used_start, high_size);
g_random_offset_high += high_size;
}
const int low_size = g_random_offset_low;
if (low_size > 0) {
FillRandomCache(0, low_size);
g_random_offset_high += low_size;
}
}
g_random_offset_high %= GetRandomBytesCacheSize();
}
}
void GetRandomFromCache(void *dst, size_t size) {
u8 * const cache = GetRandomBytesCache();
u8 * cur_dst = static_cast<u8 *>(dst);
/* NOTE: Nintendo does not do bounds checking here, and does not do multiple reads when the get would wrap around. */
while (size > 0) {
const size_t copy_size = std::min(size, static_cast<size_t>(GetRandomBytesCacheSize() - g_random_offset_low));
std::memcpy(cur_dst, cache + g_random_offset_low, copy_size);
cur_dst += copy_size;
size -= copy_size;
if (g_random_offset_low + copy_size >= GetRandomBytesCacheSize()) {
g_random_offset_low = 0;
}
}
}
}

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <exosphere.hpp>
#include "secmon_smc_common.hpp"
namespace ams::secmon::smc {
void FillRandomCache();
void RefillRandomCache();
void GetRandomFromCache(void *dst, size_t size);
}