mesosphere: Implement kernelldr through first page table mapping

This commit is contained in:
Michael Scire 2019-12-13 01:21:43 -08:00 committed by SciresM
parent b5becba8ff
commit 2866cb5fe6
24 changed files with 1520 additions and 8 deletions

View file

@ -49,6 +49,15 @@ SECTIONS
. = ALIGN(8);
} :krnlldr
/* .vectors. */
. = ALIGN(2K);
__vectors_start__ = . ;
.vectors :
{
KEEP( *(.vectors) )
. = ALIGN(8);
} :krnlldr
/* =========== RODATA section =========== */
. = ALIGN(8);
__rodata_start = . ;
@ -64,6 +73,7 @@ SECTIONS
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } :krnlldr
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } : rodata
__dynamic__start__ = . ;
.dynamic : { *(.dynamic) } :krnlldr :dyn
.dynsym : { *(.dynsym) } :krnlldr
.dynstr : { *(.dynstr) } :krnlldr

View file

@ -0,0 +1,161 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* Some macros taken from https://github.com/ARM-software/arm-trusted-firmware/blob/master/include/common/aarch64/asm_macros.S */
/*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* Declare the exception vector table, enforcing it is aligned on a
* 2KB boundary, as required by the ARMv8 architecture.
* Use zero bytes as the fill value to be stored in the padding bytes
* so that it inserts illegal AArch64 instructions. This increases
* security, robustness and potentially facilitates debugging.
*/
.macro vector_base label, section_name=.vectors
.section \section_name, "ax"
.align 11, 0
\label:
.endm
/*
* Create an entry in the exception vector table, enforcing it is
* aligned on a 128-byte boundary, as required by the ARMv8 architecture.
* Use zero bytes as the fill value to be stored in the padding bytes
* so that it inserts illegal AArch64 instructions. This increases
* security, robustness and potentially facilitates debugging.
*/
.macro vector_entry label, section_name=.vectors
.cfi_sections .debug_frame
.section \section_name, "ax"
.align 7, 0
.type \label, %function
.func \label
.cfi_startproc
\label:
.endm
/*
* This macro verifies that the given vector doesnt exceed the
* architectural limit of 32 instructions. This is meant to be placed
* immediately after the last instruction in the vector. It takes the
* vector entry as the parameter
*/
.macro check_vector_size since
.endfunc
.cfi_endproc
.if (. - \since) > (32 * 4)
.error "Vector exceeds 32 instructions"
.endif
.endm
/* Actual Vectors for KernelLdr. */
.global kernelldr_vectors
vector_base kernelldr_vectors
/* Current EL, SP0 */
.global unknown_exception
unknown_exception:
vector_entry synch_sp0
/* Just infinite loop. */
b unknown_exception
check_vector_size synch_sp0
vector_entry irq_sp0
b unknown_exception
check_vector_size irq_sp0
vector_entry fiq_sp0
b unknown_exception
check_vector_size fiq_sp0
vector_entry serror_sp0
b unknown_exception
check_vector_size serror_sp0
/* Current EL, SPx */
vector_entry synch_spx
b restore_tpidr_el1
check_vector_size synch_spx
vector_entry irq_spx
b unknown_exception
check_vector_size irq_spx
vector_entry fiq_spx
b unknown_exception
check_vector_size fiq_spx
vector_entry serror_spx
b unknown_exception
check_vector_size serror_spx
/* Lower EL, A64 */
vector_entry synch_a64
b unknown_exception
check_vector_size synch_a64
vector_entry irq_a64
b unknown_exception
check_vector_size irq_a64
vector_entry fiq_a64
b unknown_exception
check_vector_size fiq_a64
vector_entry serror_a64
b unknown_exception
check_vector_size serror_a64
/* Lower EL, A32 */
vector_entry synch_a32
b unknown_exception
check_vector_size synch_a32
vector_entry irq_a32
b unknown_exception
check_vector_size irq_a32
vector_entry fiq_a32
b unknown_exception
check_vector_size fiq_a32
vector_entry serror_a32
b unknown_exception
.endfunc
.cfi_endproc
/* To save space, insert in an unused vector segment. */
.global restore_tpidr_el1
.type restore_tpidr_el1, %function
restore_tpidr_el1:
mrs x0, tpidr_el1
/* Make sure that TPIDR_EL1 can be dereferenced. */
invalid_tpidr:
cbz x0, invalid_tpidr
/* Restore saved registers. */
ldp x19, x20, [x0], #0x10
ldp x21, x22, [x0], #0x10
ldp x23, x24, [x0], #0x10
ldp x25, x26, [x0], #0x10
ldp x27, x28, [x0], #0x10
ldp x29, x30, [x0], #0x10
ldp x1, xzr, [x0], #0x10
mov sp, x1
mov x0, #0x1
ret

View file

@ -0,0 +1,159 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::init::loader {
namespace {
constexpr size_t KernelResourceRegionSize = 0x1728000;
constexpr size_t ExtraKernelResourceSize = 0x68000;
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
constexpr size_t InitialPageTableRegionSize = 0x200000;
class KInitialPageAllocator : public KInitialPageTable::IPageAllocator {
private:
uintptr_t next_address;
public:
constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null<uintptr_t>) { /* ... */ }
ALWAYS_INLINE void Initialize(uintptr_t address) {
this->next_address = address;
}
ALWAYS_INLINE void Finalize() {
this->next_address = Null<uintptr_t>;
}
public:
virtual KPhysicalAddress Allocate() override {
MESOSPHERE_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
const uintptr_t allocated = this->next_address;
this->next_address += PageSize;
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
return allocated;
}
/* No need to override free. The default does nothing, and so would we. */
};
/* Global Allocator. */
KInitialPageAllocator g_initial_page_allocator;
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
/* TODO: Proper secure monitor call. */
KPhysicalAddress correct_base = KSystemControl::GetKernelPhysicalBaseAddress(base_address);
if (correct_base != base_address) {
const uintptr_t diff = GetInteger(correct_base) - base_address;
const size_t size = layout->rw_end_offset;
/* Conversion from KPhysicalAddress to void * is safe here, because MMU is not set up yet. */
std::memmove(reinterpret_cast<void *>(GetInteger(correct_base)), reinterpret_cast<void *>(base_address), size);
base_address += diff;
layout = reinterpret_cast<KernelLayout *>(reinterpret_cast<uintptr_t>(layout) + diff);
}
}
void SetupInitialIdentityMapping(KInitialPageTable &ttbr1_table, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::IPageAllocator &allocator) {
/* Make a new page table for TTBR0_EL1. */
KInitialPageTable ttbr0_table(allocator.Allocate());
/* Map in an RWX identity mapping for the kernel. */
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
ttbr0_table.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
/* Map in an RWX identity mapping for ourselves. */
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
//ttbr0_table.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
}
}
uintptr_t Main(uintptr_t base_address, KernelLayout *layout, uintptr_t ini_base_address) {
/* Relocate the kernel to the correct physical base address. */
/* Base address and layout are passed by reference and modified. */
RelocateKernelPhysically(base_address, layout);
/* Validate kernel layout. */
/* TODO: constexpr 0x1000 definition somewhere. */
/* In stratosphere, this is os::MemoryPageSize. */
/* We don't have ams::os, this may go in hw:: or something. */
const uintptr_t rx_offset = layout->rx_offset;
const uintptr_t rx_end_offset = layout->rx_end_offset;
const uintptr_t ro_offset = layout->rx_offset;
const uintptr_t ro_end_offset = layout->ro_end_offset;
const uintptr_t rw_offset = layout->rx_offset;
const uintptr_t rw_end_offset = layout->rw_end_offset;
const uintptr_t bss_end_offset = layout->bss_end_offset;
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rx_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rx_end_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(ro_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(ro_end_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rw_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rw_end_offset, 0x1000));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(bss_end_offset, 0x1000));
const uintptr_t bss_offset = layout->bss_offset;
const uintptr_t ini_end_offset = layout->ini_end_offset;
const uintptr_t dynamic_end_offset = layout->dynamic_end_offset;
const uintptr_t init_array_offset = layout->init_array_offset;
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
/* Decide if Kernel should have enlarged resource region. */
const bool use_extra_resources = KSystemControl::ShouldIncreaseResourceRegionSize();
const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
/* Setup the INI1 header in memory for the kernel. */
const uintptr_t ini_end_address = base_address + ini_end_offset + resource_region_size;
const uintptr_t ini_load_address = ini_end_address - InitialProcessBinarySizeMax;
if (ini_base_address != ini_load_address) {
/* The INI is not at the correct address, so we need to relocate it. */
const InitialProcessBinaryHeader *ini_header = reinterpret_cast<const InitialProcessBinaryHeader *>(ini_base_address);
if (ini_header->magic == InitialProcessBinaryMagic && ini_header->size <= InitialProcessBinarySizeMax) {
/* INI is valid, relocate it. */
std::memmove(reinterpret_cast<void *>(ini_load_address), ini_header, ini_header->size);
} else {
/* INI is invalid. Make the destination header invalid. */
std::memset(reinterpret_cast<void *>(ini_load_address), 0, sizeof(InitialProcessBinaryHeader));
}
}
/* We want to start allocating page tables at ini_end_address. */
g_initial_page_allocator.Initialize(ini_end_address);
/* Make a new page table for TTBR1_EL1. */
KInitialPageTable ttbr1_table(g_initial_page_allocator.Allocate());
/* Setup initial identity mapping. TTBR1 table passed by reference. */
SetupInitialIdentityMapping(ttbr1_table, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSize, g_initial_page_allocator);
/* TODO: Use these. */
(void)(bss_offset);
(void)(ini_end_offset);
(void)(dynamic_end_offset);
(void)(init_array_offset);
(void)(init_array_end_offset);
/* TODO */
return 0;
}
void Finalize() {
g_initial_page_allocator.Finalize();
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
inline KScopedInterruptDisable::KScopedInterruptDisable() {
/* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */
}
inline KScopedInterruptDisable::~KScopedInterruptDisable() {
/* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */
}
inline KScopedInterruptEnable::KScopedInterruptEnable() {
/* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */
}
inline KScopedInterruptEnable::~KScopedInterruptEnable() {
/* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */
}
}

View file

@ -21,4 +21,75 @@
.section .crt0.text.start, "ax", %progbits
.global _start
_start:
b _start
/* KernelLdr_Main(uintptr_t kernel_base_address, KernelMap *kernel_map, uintptr_t ini1_base_address); */
adr x18, _start
adr x16, __external_references
ldr x17, [x16, #0x8] /* bss end */
ldr x16, [x16, #0x0] /* bss start */
add x16, x16, x18
add x17, x17, x18
clear_bss:
cmp x16, x17
b.cs clear_bss_done
str xzr, [x16],#0x8
b clear_bss
clear_bss_done:
adr x17, __external_references
ldr x17, [x17, #0x10] /* stack top */
add sp, x17, x18
/* Stack is now set up. */
/* Apply relocations and call init array for KernelLdr. */
sub sp, sp, #0x20
stp x0, x1, [sp, #0x00]
stp x2, x30, [sp, #0x10]
adr x0, _start
adr x1, __external_references
ldr x1, [x1, #0x18] /* .dynamic. */
add x1, x0, x1
/* branch to ams::kern::init::Elf::Elf64::ApplyRelocations(uintptr_t, const ams::kern::init::Elf::Elf64::Dyn *); */
bl _ZN3ams4kern4init3Elf5Elf6416ApplyRelocationsEmPKNS3_3DynE
/* branch to ams::kern::init::Elf::Elf64::CallInitArrayFuncs(uintptr_t, uintptr_t) */
adr x2, _start
adr x1, __external_references
ldr x0, [x1, #0x20] /* init_array_start */
ldr x1, [x1, #0x28] /* init_array_end */
add x0, x0, x2
add x1, x1, x2
bl _ZN3ams4kern4init3Elf5Elf6418CallInitArrayFuncsEmm
/* Setup system registers, for detection of errors during init later. */
msr tpidr_el1, xzr /* Clear TPIDR_EL1 */
adr x0, __external_references
adr x1, _start
ldr x0, [x0, #0x30]
add x0, x1, x0
msr vbar_el1, x0
isb
/* Call ams::kern::init::loader::Main(uintptr_t, ams::kern::init::KernelLayout *, uintptr_t) */
ldp x0, x1, [sp, #0x00]
ldr x2, [sp, #0x10]
bl _ZN3ams4kern4init6loader4MainEmPNS1_12KernelLayoutEm
str x0, [sp, #0x00]
/* Call ams::kern::init::loader::Finalize() */
bl _ZN3ams4kern4init6loader8FinalizeEv
/* Return to the newly-relocated kernel. */
ldr x1, [sp, #0x18] /* Return address to Kernel */
ldr x2, [sp, #0x00] /* Relocated kernel base address. */
add x1, x2, x1
br x1
__external_references:
.quad __bss_start__ - _start
.quad __bss_end__ - _start
.quad __stack_end - _start
.quad __dynamic__start__ - _start
.quad __init_array_start - _start
.quad __init_array_end - _start
.quad __vectors_start__ - _start