mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-28 05:34:11 -04:00
kern: implement enough of KPageTable to initialize a thread
This commit is contained in:
parent
c6d1579265
commit
8c93eb5712
31 changed files with 1475 additions and 270 deletions
|
@ -42,4 +42,309 @@ namespace ams::kern::arm64 {
|
|||
Result KPageTable::Finalize() {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
/* Check validity of parameters. */
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_ASSERT(num_pages > 0);
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
|
||||
|
||||
if (operation == OperationType_Map) {
|
||||
MESOSPHERE_ABORT_UNLESS(is_pa_valid);
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
} else {
|
||||
MESOSPHERE_ABORT_UNLESS(!is_pa_valid);
|
||||
}
|
||||
|
||||
if (operation == OperationType_Unmap) {
|
||||
MESOSPHERE_TODO("operation == OperationType_Unmap");
|
||||
} else {
|
||||
auto entry_template = this->GetEntryTemplate(properties);
|
||||
|
||||
switch (operation) {
|
||||
case OperationType_Map:
|
||||
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
|
||||
auto &impl = this->GetImpl();
|
||||
KVirtualAddress l2_virt = Null<KVirtualAddress>;
|
||||
KVirtualAddress l3_virt = Null<KVirtualAddress>;
|
||||
int l2_open_count = 0;
|
||||
int l3_open_count = 0;
|
||||
|
||||
/* Iterate, mapping each page. */
|
||||
for (size_t i = 0; i < num_pages; i++) {
|
||||
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
|
||||
bool l2_allocated = false;
|
||||
|
||||
/* If we have no L3 table, we should get or allocate one. */
|
||||
if (l3_virt == Null<KVirtualAddress>) {
|
||||
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||
|
||||
/* If we have no L2 table, we should get or allocate one. */
|
||||
if (l2_virt == Null<KVirtualAddress>) {
|
||||
if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) {
|
||||
/* Allocate table. */
|
||||
l2_virt = AllocatePageTable(page_list, reuse_ll);
|
||||
R_UNLESS(l2_virt != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||
|
||||
/* Set the entry. */
|
||||
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||
PteDataSynchronizationBarrier();
|
||||
*l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true);
|
||||
PteDataSynchronizationBarrier();
|
||||
l2_allocated = true;
|
||||
} else {
|
||||
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||
}
|
||||
}
|
||||
MESOSPHERE_ASSERT(l2_virt != Null<KVirtualAddress>);
|
||||
|
||||
if (L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); !l2_entry->GetTable(l3_phys)) {
|
||||
/* Allocate table. */
|
||||
l3_virt = AllocatePageTable(page_list, reuse_ll);
|
||||
if (l3_virt == Null<KVirtualAddress>) {
|
||||
/* Cleanup the L2 entry. */
|
||||
if (l2_allocated) {
|
||||
*impl.GetL1Entry(virt_addr) = InvalidL1PageTableEntry;
|
||||
this->NoteUpdated();
|
||||
FreePageTable(page_list, l2_virt);
|
||||
} else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
|
||||
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||
}
|
||||
return svc::ResultOutOfResource();
|
||||
}
|
||||
|
||||
/* Set the entry. */
|
||||
l3_phys = GetPageTablePhysicalAddress(l3_virt);
|
||||
PteDataSynchronizationBarrier();
|
||||
*l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true);
|
||||
PteDataSynchronizationBarrier();
|
||||
l2_open_count++;
|
||||
} else {
|
||||
l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||
}
|
||||
}
|
||||
MESOSPHERE_ASSERT(l3_virt != Null<KVirtualAddress>);
|
||||
|
||||
/* Map the page. */
|
||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(phys_addr, entry_template, false);
|
||||
l3_open_count++;
|
||||
virt_addr += PageSize;
|
||||
phys_addr += PageSize;
|
||||
|
||||
/* Account for hitting end of table. */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize)) {
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||
this->GetPageTableManager().Open(l3_virt, l3_open_count);
|
||||
}
|
||||
l3_virt = Null<KVirtualAddress>;
|
||||
l3_open_count = 0;
|
||||
|
||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) {
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
|
||||
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||
}
|
||||
l2_virt = Null<KVirtualAddress>;
|
||||
l2_open_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Perform any remaining opens. */
|
||||
if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||
}
|
||||
if (l3_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||
this->GetPageTableManager().Open(l3_virt, l3_open_count);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_LOG("KPageTable::MapContiguous(%016lx, %016lx, %zu)\n", GetInteger(virt_addr), GetInteger(phys_addr), num_pages);
|
||||
|
||||
/* Cache initial addresses for use on cleanup. */
|
||||
const KProcessAddress orig_virt_addr = virt_addr;
|
||||
const KPhysicalAddress orig_phys_addr = phys_addr;
|
||||
|
||||
size_t remaining_pages = num_pages;
|
||||
|
||||
if (num_pages < ContiguousPageSize / PageSize) {
|
||||
auto guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); };
|
||||
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll));
|
||||
guard.Cancel();
|
||||
} else {
|
||||
MESOSPHERE_TODO("Contiguous mapping");
|
||||
(void)remaining_pages;
|
||||
}
|
||||
|
||||
/* Perform what coalescing we can. */
|
||||
this->MergePages(orig_virt_addr, page_list);
|
||||
if (num_pages > 1) {
|
||||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
/* Open references to the pages, if we should. */
|
||||
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
||||
Kernel::GetMemoryManager().Open(GetHeapVirtualAddress(orig_phys_addr), num_pages);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
auto &impl = this->GetImpl();
|
||||
bool merged = false;
|
||||
|
||||
/* If there's no L1 table, don't bother. */
|
||||
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
||||
if (!l1_entry->IsTable()) {
|
||||
return merged;
|
||||
}
|
||||
|
||||
/* Examine and try to merge the L2 table. */
|
||||
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr);
|
||||
if (l2_entry->IsTable()) {
|
||||
/* We have an L3 entry. */
|
||||
L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr);
|
||||
if (!l3_entry->IsBlock() || !l3_entry->IsContiguousAllowed()) {
|
||||
return merged;
|
||||
}
|
||||
|
||||
/* If it's not contiguous, try to make it so. */
|
||||
if (!l3_entry->IsContiguous()) {
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize);
|
||||
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize);
|
||||
const u64 entry_template = l3_entry->GetEntryTemplate();
|
||||
|
||||
/* Validate that we can merge. */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L3Block)) {
|
||||
return merged;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge! */
|
||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||
impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->SetContiguous(true);
|
||||
}
|
||||
|
||||
/* Note that we updated. */
|
||||
this->NoteUpdated();
|
||||
merged = true;
|
||||
}
|
||||
|
||||
/* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize);
|
||||
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L2BlockSize);
|
||||
const u64 entry_template = l3_entry->GetEntryTemplate();
|
||||
|
||||
/* Validate that we can merge. */
|
||||
for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) {
|
||||
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
|
||||
return merged;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge! */
|
||||
PteDataSynchronizationBarrier();
|
||||
*l2_entry = L2PageTableEntry(phys_addr, entry_template, false);
|
||||
|
||||
/* Note that we updated. */
|
||||
this->NoteUpdated();
|
||||
merged = true;
|
||||
|
||||
/* Free the L3 table. */
|
||||
KVirtualAddress l3_table = util::AlignDown(reinterpret_cast<uintptr_t>(l3_entry), PageSize);
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) {
|
||||
this->GetPageTableManager().Close(l3_table, L2BlockSize / L3BlockSize);
|
||||
this->FreePageTable(page_list, l3_table);
|
||||
}
|
||||
}
|
||||
if (l2_entry->IsBlock()) {
|
||||
/* If it's not contiguous, try to make it so. */
|
||||
if (!l2_entry->IsContiguous()) {
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
|
||||
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize);
|
||||
const u64 entry_template = l2_entry->GetEntryTemplate();
|
||||
|
||||
/* Validate that we can merge. */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L2Block)) {
|
||||
return merged;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge! */
|
||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||
impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true);
|
||||
}
|
||||
|
||||
/* Note that we updated. */
|
||||
this->NoteUpdated();
|
||||
merged = true;
|
||||
}
|
||||
|
||||
/* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */
|
||||
virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
|
||||
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize);
|
||||
const u64 entry_template = l2_entry->GetEntryTemplate();
|
||||
|
||||
/* Validate that we can merge. */
|
||||
for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) {
|
||||
if (!impl.GetL2Entry(l1_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
|
||||
return merged;
|
||||
}
|
||||
}
|
||||
|
||||
/* Merge! */
|
||||
PteDataSynchronizationBarrier();
|
||||
*l1_entry = L1PageTableEntry(phys_addr, entry_template, false);
|
||||
|
||||
/* Note that we updated. */
|
||||
this->NoteUpdated();
|
||||
merged = true;
|
||||
|
||||
/* Free the L2 table. */
|
||||
KVirtualAddress l2_table = util::AlignDown(reinterpret_cast<uintptr_t>(l2_entry), PageSize);
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
|
||||
this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize);
|
||||
this->FreePageTable(page_list, l2_table);
|
||||
}
|
||||
}
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
void KPageTable::FinalizeUpdate(PageLinkedList *page_list) {
|
||||
while (page_list->Peek()) {
|
||||
KVirtualAddress page = KVirtualAddress(page_list->Pop());
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
|
||||
this->GetPageTableManager().Free(page);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,26 +17,13 @@
|
|||
|
||||
namespace ams::kern::arm64 {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
||||
constexpr size_t NumLevels = 3;
|
||||
constexpr size_t LevelBits = 9;
|
||||
static_assert(NumLevels > 0);
|
||||
|
||||
constexpr size_t AddressBits = (NumLevels - 1) * LevelBits + PageBits;
|
||||
static_assert(AddressBits <= BITSIZEOF(u64));
|
||||
constexpr size_t AddressSpaceSize = (1ull << AddressBits);
|
||||
|
||||
}
|
||||
|
||||
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
||||
this->table = static_cast<u64 *>(tb);
|
||||
this->table = static_cast<L1PageTableEntry *>(tb);
|
||||
this->is_kernel = true;
|
||||
this->num_entries = util::AlignUp(end - start, AddressSpaceSize) / AddressSpaceSize;
|
||||
}
|
||||
|
||||
u64 *KPageTableImpl::Finalize() {
|
||||
L1PageTableEntry *KPageTableImpl::Finalize() {
|
||||
return this->table;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,13 +23,13 @@ namespace ams::kern {
|
|||
R_UNLESS(start_block != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Set our start and end. */
|
||||
this->start = st;
|
||||
this->end = nd;
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end), PageSize));
|
||||
this->start_address = st;
|
||||
this->end_address = nd;
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start_address), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end_address), PageSize));
|
||||
|
||||
/* Initialize and insert the block. */
|
||||
start_block->Initialize(this->start, (this->end - this->start) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||
start_block->Initialize(this->start_address, (this->end_address - this->start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||
this->memory_block_tree.insert(*start_block);
|
||||
|
||||
return ResultSuccess();
|
||||
|
@ -47,6 +47,37 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(this->memory_block_tree.empty());
|
||||
}
|
||||
|
||||
KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||
if (num_pages > 0) {
|
||||
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
|
||||
const KProcessAddress region_last = region_end - 1;
|
||||
for (const_iterator it = this->FindIterator(region_start); it != this->memory_block_tree.cend(); it++) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
if (info.state != KMemoryState_Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset;
|
||||
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
||||
|
||||
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const KProcessAddress area_last = area_end - 1;
|
||||
|
||||
if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Null<KProcessAddress>;
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
|
||||
/* Ensure for auditing that we never end up with an invalid tree. */
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
|
@ -101,7 +132,7 @@ namespace ams::kern {
|
|||
|
||||
/* Find the iterator now that we've updated. */
|
||||
it = this->FindIterator(address);
|
||||
if (address != this->start) {
|
||||
if (address != this->start_address) {
|
||||
it--;
|
||||
}
|
||||
|
||||
|
|
|
@ -135,7 +135,6 @@ namespace ams::kern {
|
|||
|
||||
namespace {
|
||||
|
||||
|
||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||
|
||||
constexpr size_t CarveoutAlignment = 0x20000;
|
||||
|
|
|
@ -103,19 +103,10 @@ namespace ams::kern {
|
|||
/* Loop, trying to iterate from each block. */
|
||||
Impl *chosen_manager = nullptr;
|
||||
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
||||
if (dir == Direction_FromBack) {
|
||||
for (chosen_manager = this->pool_managers_tail[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetPrev()) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||
if (allocated_block != Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (chosen_manager = this->pool_managers_head[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetNext()) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||
if (allocated_block != Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||
if (allocated_block != Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,6 +129,70 @@ namespace ams::kern {
|
|||
return allocated_block;
|
||||
}
|
||||
|
||||
Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) {
|
||||
MESOSPHERE_ASSERT(out != nullptr);
|
||||
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
/* Early return if we're allocating no pages. */
|
||||
if (num_pages == 0) {
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
/* Lock the pool that we're allocating from. */
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(this->pool_locks[pool]);
|
||||
|
||||
/* Choose a heap based on our page size request. */
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
||||
|
||||
/* Ensure that we don't leave anything un-freed. */
|
||||
auto group_guard = SCOPE_GUARD {
|
||||
for (const auto &it : *out) {
|
||||
auto &manager = this->GetManager(it.GetAddress());
|
||||
const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||
manager.Free(it.GetAddress(), num_pages);
|
||||
}
|
||||
out->Finalize();
|
||||
};
|
||||
|
||||
/* Keep allocating until we've allocated all our pages. */
|
||||
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
||||
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
|
||||
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
/* Allocate a block. */
|
||||
KVirtualAddress allocated_block = cur_manager->AllocateBlock(index);
|
||||
if (allocated_block == Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Safely add it to our group. */
|
||||
{
|
||||
auto block_guard = SCOPE_GUARD { cur_manager->Free(allocated_block, pages_per_alloc); };
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
block_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Maintain the optimized memory bitmap, if we should. */
|
||||
if (this->has_optimized_process[pool]) {
|
||||
cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc);
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Only succeed if we allocated as many pages as we wanted. */
|
||||
MESOSPHERE_ASSERT(num_pages >= 0);
|
||||
R_UNLESS(num_pages == 0, svc::ResultOutOfMemory());
|
||||
|
||||
/* We succeeded! */
|
||||
group_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
||||
/* Calculate metadata sizes. */
|
||||
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
void KPageGroup::Initialize(KBlockInfoManager *m) {
|
||||
this->manager = m;
|
||||
}
|
||||
|
||||
void KPageGroup::Finalize() {
|
||||
auto it = this->block_list.begin();
|
||||
while (it != this->block_list.end()) {
|
||||
|
|
|
@ -104,7 +104,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(big_index >= 0);
|
||||
|
||||
/* Free space before the big blocks. */
|
||||
for (s32 i = big_index; i >= 0; i--) {
|
||||
for (s32 i = big_index - 1; i >= 0; i--) {
|
||||
const size_t block_size = this->blocks[i].GetSize();
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
|
@ -113,11 +113,11 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Free space after the big blocks. */
|
||||
for (s32 i = big_index; i >= 0; i--) {
|
||||
for (s32 i = big_index - 1; i >= 0; i--) {
|
||||
const size_t block_size = this->blocks[i].GetSize();
|
||||
while (after_start + block_size <= after_end) {
|
||||
after_start += block_size;
|
||||
this->FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ namespace ams::kern {
|
|||
this->stack_fill_value = MemoryFillValue_Zero;
|
||||
|
||||
this->cached_physical_linear_region = nullptr;
|
||||
this->cached_physical_non_kernel_dram_region = nullptr;
|
||||
this->cached_physical_heap_region = nullptr;
|
||||
this->cached_virtual_managed_pool_dram_region = nullptr;
|
||||
|
||||
/* Initialize our implementation. */
|
||||
|
@ -67,4 +67,279 @@ namespace ams::kern {
|
|||
this->memory_block_manager.Finalize(this->memory_block_slab_manager);
|
||||
MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();");
|
||||
}
|
||||
|
||||
KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const {
|
||||
switch (state) {
|
||||
case KMemoryState_Free:
|
||||
case KMemoryState_Kernel:
|
||||
return this->address_space_start;
|
||||
case KMemoryState_Normal:
|
||||
return this->heap_region_start;
|
||||
case KMemoryState_Ipc:
|
||||
case KMemoryState_NonSecureIpc:
|
||||
case KMemoryState_NonDeviceIpc:
|
||||
return this->alias_region_start;
|
||||
case KMemoryState_Stack:
|
||||
return this->stack_region_start;
|
||||
case KMemoryState_Io:
|
||||
case KMemoryState_Static:
|
||||
case KMemoryState_ThreadLocal:
|
||||
return this->kernel_map_region_start;
|
||||
case KMemoryState_Shared:
|
||||
case KMemoryState_AliasCode:
|
||||
case KMemoryState_AliasCodeData:
|
||||
case KMemoryState_Transfered:
|
||||
case KMemoryState_SharedTransfered:
|
||||
case KMemoryState_SharedCode:
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
return this->alias_code_region_start;
|
||||
case KMemoryState_Code:
|
||||
case KMemoryState_CodeData:
|
||||
return this->code_region_start;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
size_t KPageTableBase::GetRegionSize(KMemoryState state) const {
|
||||
switch (state) {
|
||||
case KMemoryState_Free:
|
||||
case KMemoryState_Kernel:
|
||||
return this->address_space_end - this->address_space_start;
|
||||
case KMemoryState_Normal:
|
||||
return this->heap_region_end - this->heap_region_start;
|
||||
case KMemoryState_Ipc:
|
||||
case KMemoryState_NonSecureIpc:
|
||||
case KMemoryState_NonDeviceIpc:
|
||||
return this->alias_region_end - this->alias_region_start;
|
||||
case KMemoryState_Stack:
|
||||
return this->stack_region_end - this->stack_region_start;
|
||||
case KMemoryState_Io:
|
||||
case KMemoryState_Static:
|
||||
case KMemoryState_ThreadLocal:
|
||||
return this->kernel_map_region_end - this->kernel_map_region_start;
|
||||
case KMemoryState_Shared:
|
||||
case KMemoryState_AliasCode:
|
||||
case KMemoryState_AliasCodeData:
|
||||
case KMemoryState_Transfered:
|
||||
case KMemoryState_SharedTransfered:
|
||||
case KMemoryState_SharedCode:
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
return this->alias_code_region_end - this->alias_code_region_start;
|
||||
case KMemoryState_Code:
|
||||
case KMemoryState_CodeData:
|
||||
return this->code_region_end - this->code_region_start;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
bool KPageTableBase::Contains(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||
const KProcessAddress end = addr + size;
|
||||
const KProcessAddress last = end - 1;
|
||||
|
||||
const KProcessAddress region_start = this->GetRegionAddress(state);
|
||||
const size_t region_size = this->GetRegionSize(state);
|
||||
|
||||
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
|
||||
const bool is_in_heap = !(end <= this->heap_region_start || this->heap_region_end <= addr);
|
||||
const bool is_in_alias = !(end <= this->alias_region_start || this->alias_region_end <= addr);
|
||||
switch (state) {
|
||||
case KMemoryState_Free:
|
||||
case KMemoryState_Kernel:
|
||||
return is_in_region;
|
||||
case KMemoryState_Io:
|
||||
case KMemoryState_Static:
|
||||
case KMemoryState_Code:
|
||||
case KMemoryState_CodeData:
|
||||
case KMemoryState_Shared:
|
||||
case KMemoryState_AliasCode:
|
||||
case KMemoryState_AliasCodeData:
|
||||
case KMemoryState_Stack:
|
||||
case KMemoryState_ThreadLocal:
|
||||
case KMemoryState_Transfered:
|
||||
case KMemoryState_SharedTransfered:
|
||||
case KMemoryState_SharedCode:
|
||||
case KMemoryState_GeneratedCode:
|
||||
case KMemoryState_CodeOut:
|
||||
return is_in_region && !is_in_heap && !is_in_alias;
|
||||
case KMemoryState_Normal:
|
||||
MESOSPHERE_ASSERT(is_in_heap);
|
||||
return is_in_region && !is_in_alias;
|
||||
case KMemoryState_Ipc:
|
||||
case KMemoryState_NonSecureIpc:
|
||||
case KMemoryState_NonDeviceIpc:
|
||||
MESOSPHERE_ASSERT(is_in_alias);
|
||||
return is_in_region && !is_in_heap;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||
/* Validate the states match expectation. */
|
||||
R_UNLESS((info.state & state_mask) == state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
/* Get information about the first block. */
|
||||
const KProcessAddress last_addr = addr + size - 1;
|
||||
KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr);
|
||||
KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Validate all blocks in the range have correct state. */
|
||||
const KMemoryState first_state = info.state;
|
||||
const KMemoryPermission first_perm = info.perm;
|
||||
const KMemoryAttribute first_attr = info.attribute;
|
||||
while (true) {
|
||||
/* Validate the current block. */
|
||||
R_UNLESS(info.state == first_state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(info.perm == first_perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Validate against the provided masks. */
|
||||
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
|
||||
/* Break once we're done. */
|
||||
if (last_addr <= info.GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance our iterator. */
|
||||
it++;
|
||||
MESOSPHERE_ASSERT(it != this->memory_block_manager.cend());
|
||||
info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Write output state. */
|
||||
if (out_state) {
|
||||
*out_state = first_state;
|
||||
}
|
||||
if (out_perm) {
|
||||
*out_perm = first_perm;
|
||||
}
|
||||
if (out_attr) {
|
||||
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
||||
}
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_ASSERT(out_info != nullptr);
|
||||
MESOSPHERE_ASSERT(out_page != nullptr);
|
||||
|
||||
const KMemoryBlock *block = this->memory_block_manager.FindBlock(address);
|
||||
R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
*out_info = block->GetMemoryInfo();
|
||||
out_page->flags = 0;
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||
KProcessAddress address = Null<KProcessAddress>;
|
||||
|
||||
if (num_pages <= region_num_pages) {
|
||||
if (this->IsAslrEnabled()) {
|
||||
/* Try to directly find a free area up to 8 times. */
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
|
||||
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
|
||||
|
||||
KMemoryInfo info;
|
||||
ams::svc::PageInfo page_info;
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(&info, &page_info, candidate));
|
||||
|
||||
if (info.state != KMemoryState_Free) { continue; }
|
||||
if (!(region_start <= candidate)) { continue; }
|
||||
if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
|
||||
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; }
|
||||
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
|
||||
|
||||
address = candidate;
|
||||
break;
|
||||
}
|
||||
/* Fall back to finding the first free area with a random offset. */
|
||||
if (address == Null<KProcessAddress>) {
|
||||
/* NOTE: Nintendo does not account for guard pages here. */
|
||||
/* This may theoretically cause an offset to be chosen that cannot be mapped. */
|
||||
/* TODO: Should we account for guard pages? */
|
||||
const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages);
|
||||
address = this->memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages);
|
||||
}
|
||||
}
|
||||
/* Find the first free area. */
|
||||
if (address == Null<KProcessAddress>) {
|
||||
address = this->memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages);
|
||||
}
|
||||
}
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) {
|
||||
/* Create a page group to hold the pages we allocate. */
|
||||
KPageGroup pg(this->block_info_manager);
|
||||
|
||||
/* Allocate the pages. */
|
||||
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, this->allocate_option));
|
||||
|
||||
/* Ensure that the page group is open while we work with it. */
|
||||
KScopedPageGroup spg(pg);
|
||||
|
||||
/* Clear all pages. */
|
||||
for (const auto &it : pg) {
|
||||
std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize());
|
||||
}
|
||||
|
||||
/* Map the pages. */
|
||||
return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false);
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||
|
||||
/* Ensure this is a valid map request. */
|
||||
R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
|
||||
|
||||
/* Lock the table. */
|
||||
KScopedLightLock lk(this->general_lock);
|
||||
|
||||
/* Find a random address to map at. */
|
||||
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
|
||||
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
|
||||
MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state));
|
||||
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_All, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||
|
||||
/* Create an update allocator. */
|
||||
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||
R_TRY(allocator.GetResult());
|
||||
|
||||
/* We're going to perform an update, so create a helper. */
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
/* Perform mapping operation. */
|
||||
const KPageProperties properties = { perm, false, false, false };
|
||||
if (is_pa_valid) {
|
||||
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
||||
} else {
|
||||
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties));
|
||||
}
|
||||
|
||||
/* Update the blocks. */
|
||||
this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None);
|
||||
|
||||
/* We successfully mapped the pages. */
|
||||
*out_addr = addr;
|
||||
return ResultSuccess();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -200,14 +200,18 @@ namespace ams::kern {
|
|||
KPageBuffer *page = KPageBuffer::Allocate();
|
||||
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
|
||||
/* Map the stack page. */
|
||||
KProcessAddress stack_top = Null<KProcessAddress>;
|
||||
{
|
||||
KProcessAddress stack_bottom = Null<KProcessAddress>;
|
||||
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
|
||||
MESOSPHERE_TODO("R_TRY(Kernel::GetSupervisorPageTable().Map); ...");
|
||||
(void)(stack_region);
|
||||
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
|
||||
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
||||
page_guard.Cancel();
|
||||
|
||||
|
||||
/* Calculate top of the stack. */
|
||||
stack_top = stack_bottom + PageSize;
|
||||
}
|
||||
|
||||
/* Initialize the thread. */
|
||||
|
|
|
@ -21,4 +21,8 @@ void operator delete (void *deleted) throw() {
|
|||
|
||||
void operator delete (void *deleted, size_t size) throw() {
|
||||
MESOSPHERE_PANIC("operator delete(void *, size_t) was called: %p %zu", deleted, size);
|
||||
}
|
||||
}
|
||||
|
||||
void abort() {
|
||||
MESOSPHERE_PANIC("abort() was called");
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue