kern: load initial process binary from user pool, rather than from pt heap

This commit is contained in:
Michael Scire 2021-04-07 12:25:10 -07:00 committed by SciresM
parent a1e137cc1c
commit 0f8b7be2d2
13 changed files with 350 additions and 184 deletions

View file

@ -25,6 +25,7 @@ namespace ams::kern::board::nintendo::nx {
/* Initialization. */
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
static KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
static size_t GetApplicationPoolSize();

View file

@ -27,7 +27,7 @@ namespace ams::kern::init {
u32 rw_end_offset;
u32 bss_offset;
u32 bss_end_offset;
u32 ini_load_offset;
u32 resource_offset;
u32 dynamic_offset;
u32 init_array_offset;
u32 init_array_end_offset;

View file

@ -29,11 +29,12 @@ namespace ams::kern {
u32 reserved;
};
NOINLINE void CopyInitialProcessBinaryToKernelMemory();
NOINLINE size_t CopyInitialProcessBinaryToKernelMemory();
NOINLINE void CreateAndRunInitialProcesses();
u64 GetInitialProcessIdMin();
u64 GetInitialProcessIdMax();
KVirtualAddress GetInitialProcessBinaryAddress();
size_t GetInitialProcessesSecureMemorySize();
}

View file

@ -91,46 +91,49 @@ namespace ams::kern {
class KInitialProcessReader {
private:
KInitialProcessHeader *m_kip_header;
KInitialProcessHeader m_kip_header;
public:
constexpr KInitialProcessReader() : m_kip_header() { /* ... */ }
constexpr const u32 *GetCapabilities() const { return m_kip_header->GetCapabilities(); }
constexpr size_t GetNumCapabilities() const { return m_kip_header->GetNumCapabilities(); }
constexpr const u32 *GetCapabilities() const { return m_kip_header.GetCapabilities(); }
constexpr size_t GetNumCapabilities() const { return m_kip_header.GetNumCapabilities(); }
constexpr size_t GetBinarySize() const {
return sizeof(*m_kip_header) + m_kip_header->GetRxCompressedSize() + m_kip_header->GetRoCompressedSize() + m_kip_header->GetRwCompressedSize();
return m_kip_header.GetRxCompressedSize() + m_kip_header.GetRoCompressedSize() + m_kip_header.GetRwCompressedSize();
}
constexpr size_t GetSize() const {
if (const size_t bss_size = m_kip_header->GetBssSize(); bss_size != 0) {
return m_kip_header->GetBssAddress() + m_kip_header->GetBssSize();
if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size != 0) {
return util::AlignUp(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize);
} else {
return m_kip_header->GetRwAddress() + m_kip_header->GetRwSize();
return util::AlignUp(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize);
}
}
constexpr u8 GetPriority() const { return m_kip_header->GetPriority(); }
constexpr u8 GetIdealCoreId() const { return m_kip_header->GetIdealCoreId(); }
constexpr u32 GetAffinityMask() const { return m_kip_header->GetAffinityMask(); }
constexpr u32 GetStackSize() const { return m_kip_header->GetStackSize(); }
constexpr u8 GetPriority() const { return m_kip_header.GetPriority(); }
constexpr u8 GetIdealCoreId() const { return m_kip_header.GetIdealCoreId(); }
constexpr u32 GetAffinityMask() const { return m_kip_header.GetAffinityMask(); }
constexpr u32 GetStackSize() const { return m_kip_header.GetStackSize(); }
constexpr bool Is64Bit() const { return m_kip_header->Is64Bit(); }
constexpr bool Is64BitAddressSpace() const { return m_kip_header->Is64BitAddressSpace(); }
constexpr bool UsesSecureMemory() const { return m_kip_header->UsesSecureMemory(); }
constexpr bool IsImmortal() const { return m_kip_header->IsImmortal(); }
constexpr bool Is64Bit() const { return m_kip_header.Is64Bit(); }
constexpr bool Is64BitAddressSpace() const { return m_kip_header.Is64BitAddressSpace(); }
constexpr bool UsesSecureMemory() const { return m_kip_header.UsesSecureMemory(); }
constexpr bool IsImmortal() const { return m_kip_header.IsImmortal(); }
bool Attach(u8 *bin) {
if (KInitialProcessHeader *header = reinterpret_cast<KInitialProcessHeader *>(bin); header->IsValid()) {
m_kip_header = header;
return true;
KVirtualAddress Attach(KVirtualAddress bin) {
/* Copy the header. */
m_kip_header = *GetPointer<const KInitialProcessHeader>(bin);
/* Check that it's valid. */
if (m_kip_header.IsValid()) {
return bin + sizeof(KInitialProcessHeader);
} else {
return false;
return Null<KVirtualAddress>;
}
}
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params) const;
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params, KProcessAddress src) const;
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter &params) const;
};

View file

@ -75,7 +75,7 @@ namespace ams::kern {
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
void UpdateUsedHeapSize() { m_heap.UpdateUsedSize(); }
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
@ -168,6 +168,10 @@ namespace ams::kern {
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
}
const Impl &GetManager(KVirtualAddress address) const {
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
}
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool];
}
@ -197,6 +201,10 @@ namespace ams::kern {
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
Pool GetPool(KVirtualAddress address) const {
return this->GetManager(address).GetPool();
}
void Open(KVirtualAddress address, size_t num_pages) {
/* Repeatedly open references until we've done so for all pages. */
while (num_pages) {

View file

@ -125,7 +125,7 @@ namespace ams::kern {
private:
KVirtualAddress m_heap_address;
size_t m_heap_size;
size_t m_used_size;
size_t m_initial_used_size;
size_t m_num_blocks;
Block m_blocks[NumMemoryBlockPageShifts];
private:
@ -134,7 +134,7 @@ namespace ams::kern {
void FreeBlock(KVirtualAddress block, s32 index);
public:
KPageHeap() : m_heap_address(), m_heap_size(), m_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
constexpr size_t GetSize() const { return m_heap_size; }
@ -149,8 +149,13 @@ namespace ams::kern {
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
void DumpFreeList() const;
void UpdateUsedSize() {
m_used_size = m_heap_size - (this->GetNumFreePages() * PageSize);
void SetInitialUsedSize(size_t reserved_size) {
/* Check that the reserved size is valid. */
const size_t free_size = this->GetNumFreePages() * PageSize;
MESOSPHERE_ABORT_UNLESS(m_heap_size >= free_size + reserved_size);
/* Set the initial used size. */
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
KVirtualAddress AllocateBlock(s32 index, bool random);