mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-23 11:16:57 -04:00
kern: implement KMemoryManager init
This commit is contained in:
parent
1de607c183
commit
f7d3d50f33
9 changed files with 283 additions and 9 deletions
|
@ -19,16 +19,103 @@ namespace ams::kern {
|
|||
|
||||
namespace {
|
||||
|
||||
constexpr size_t g_memory_block_page_shifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
|
||||
constexpr size_t NumMemoryBlockPageShifts = util::size(g_memory_block_page_shifts);
|
||||
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||
switch (type) {
|
||||
case KMemoryRegionType_VirtualDramApplicationPool: return KMemoryManager::Pool_Application;
|
||||
case KMemoryRegionType_VirtualDramAppletPool: return KMemoryManager::Pool_Applet;
|
||||
case KMemoryRegionType_VirtualDramSystemPool: return KMemoryManager::Pool_System;
|
||||
case KMemoryRegionType_VirtualDramSystemNonSecurePool: return KMemoryManager::Pool_SystemNonSecure;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void KMemoryManager::Initialize(KVirtualAddress metadata_region, size_t metadata_region_size) {
|
||||
/* Clear the metadata region to zero. */
|
||||
const KVirtualAddress metadata_region_end = metadata_region + metadata_region_size;
|
||||
std::memset(GetVoidPointer(metadata_region), 0, metadata_region_size);
|
||||
|
||||
/* Traverse the virtual memory layout tree, initializing each manager as appropriate. */
|
||||
while (true) {
|
||||
/* Locate the region that should initialize the current manager. */
|
||||
const KMemoryRegion *region = nullptr;
|
||||
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
|
||||
/* We only care about regions that we need to create managers for. */
|
||||
if (!it.IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We want to initialize the managers in order. */
|
||||
if (it.GetAttributes() != this->num_managers) {
|
||||
continue;
|
||||
}
|
||||
|
||||
region = std::addressof(it);
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we didn't find a region, then we're done initializing managers. */
|
||||
if (region == nullptr) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure that the region is correct. */
|
||||
MESOSPHERE_ASSERT(region->GetAddress() != Null<decltype(region->GetAddress())>);
|
||||
MESOSPHERE_ASSERT(region->GetSize() > 0);
|
||||
MESOSPHERE_ASSERT(region->GetEndAddress() >= region->GetAddress());
|
||||
MESOSPHERE_ASSERT(region->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool));
|
||||
MESOSPHERE_ASSERT(region->GetAttributes() == this->num_managers);
|
||||
|
||||
/* Initialize a new manager for the region. */
|
||||
const Pool pool = GetPoolFromMemoryRegionType(region->GetType());
|
||||
Impl *manager = std::addressof(this->managers[this->num_managers++]);
|
||||
MESOSPHERE_ABORT_UNLESS(this->num_managers <= util::size(this->managers));
|
||||
|
||||
const size_t cur_size = manager->Initialize(region, pool, metadata_region, metadata_region_end);
|
||||
metadata_region += cur_size;
|
||||
MESOSPHERE_ABORT_UNLESS(metadata_region <= metadata_region_end);
|
||||
|
||||
/* Insert the manager into the pool list. */
|
||||
if (this->pool_managers_tail[pool] == nullptr) {
|
||||
this->pool_managers_head[pool] = manager;
|
||||
} else {
|
||||
this->pool_managers_tail[pool]->SetNext(manager);
|
||||
manager->SetPrev(this->pool_managers_tail[pool]);
|
||||
}
|
||||
this->pool_managers_tail[pool] = manager;
|
||||
}
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
||||
/* Calculate metadata sizes. */
|
||||
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
||||
const size_t optimize_map_size = (util::AlignUp((region->GetSize() / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
||||
const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
|
||||
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region->GetSize());
|
||||
const size_t total_metadata_size = manager_size + page_heap_size;
|
||||
MESOSPHERE_ABORT_UNLESS(manager_size <= total_metadata_size);
|
||||
MESOSPHERE_ABORT_UNLESS(metadata + total_metadata_size <= metadata_end);
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_metadata_size, PageSize));
|
||||
|
||||
/* Setup region. */
|
||||
this->pool = p;
|
||||
this->metadata_region = metadata;
|
||||
this->page_reference_counts = GetPointer<RefCount>(metadata + optimize_map_size);
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->metadata_region), PageSize));
|
||||
|
||||
/* Initialize the manager's KPageHeap. */
|
||||
this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size);
|
||||
|
||||
return total_metadata_size;
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) {
|
||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const size_t bitmap_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
||||
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size, g_memory_block_page_shifts, NumMemoryBlockPageShifts);
|
||||
return util::AlignUp(page_heap_size + bitmap_size + ref_count_size, PageSize);
|
||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
|
||||
const size_t manager_meta_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
|
||||
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size);
|
||||
return manager_meta_size + page_heap_size;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,10 +17,36 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts) {
|
||||
/* Check our assumptions. */
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
|
||||
const KVirtualAddress metadata_end = metadata_address + metadata_size;
|
||||
|
||||
/* Set our members. */
|
||||
this->heap_address = address;
|
||||
this->heap_size = size;
|
||||
this->num_blocks = num_block_shifts;
|
||||
|
||||
/* Setup bitmaps. */
|
||||
u64 *cur_bitmap_storage = GetPointer<u64>(metadata_address);
|
||||
for (size_t i = 0; i < num_block_shifts; i++) {
|
||||
const size_t cur_block_shift = block_shifts[i];
|
||||
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
|
||||
cur_bitmap_storage = this->blocks[i].Initialize(this->heap_address, this->heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage);
|
||||
}
|
||||
|
||||
/* Ensure we didn't overextend our bounds. */
|
||||
MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end);
|
||||
}
|
||||
|
||||
size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) {
|
||||
size_t overhead_size = 0;
|
||||
for (size_t i = 0; i < num_block_shifts; i++) {
|
||||
overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, block_shifts[i], (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0);
|
||||
const size_t cur_block_shift = block_shifts[i];
|
||||
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
|
||||
overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, cur_block_shift, next_block_shift);
|
||||
}
|
||||
return util::AlignUp(overhead_size, PageSize);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ namespace ams::kern {
|
|||
KThread Kernel::s_main_threads[cpu::NumCores];
|
||||
KThread Kernel::s_idle_threads[cpu::NumCores];
|
||||
KResourceLimit Kernel::s_system_resource_limit;
|
||||
KMemoryManager Kernel::s_memory_manager;
|
||||
|
||||
void Kernel::InitializeCoreLocalRegion(s32 core_id) {
|
||||
/* Construct the core local region object in place. */
|
||||
|
|
|
@ -38,6 +38,12 @@ namespace ams::kern {
|
|||
/* Initialize KSystemControl. */
|
||||
KSystemControl::Initialize();
|
||||
|
||||
/* Initialize the memory manager. */
|
||||
{
|
||||
const auto &metadata_region = KMemoryLayout::GetMetadataPoolRegion();
|
||||
Kernel::GetMemoryManager().Initialize(metadata_region.GetAddress(), metadata_region.GetSize());
|
||||
}
|
||||
|
||||
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
|
||||
/* However, for testing (and to manifest this code in the produced binary, this is here for now. */
|
||||
/* TODO: Do this better. */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue