mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-31 23:08:22 -04:00
kern: implement through kip decompression
This commit is contained in:
parent
cbc73f4407
commit
92521eed2a
12 changed files with 427 additions and 42 deletions
|
@ -157,6 +157,8 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
void FlushEntireDataCacheSharedForInit();
|
||||
void FlushEntireDataCacheLocalForInit();
|
||||
|
||||
void FlushEntireDataCache();
|
||||
|
||||
Result InvalidateDataCache(void *addr, size_t size);
|
||||
Result StoreDataCache(const void *addr, size_t size);
|
||||
Result FlushDataCache(const void *addr, size_t size);
|
||||
|
|
|
@ -44,7 +44,6 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
BlockType_Count,
|
||||
};
|
||||
|
||||
static_assert(L3BlockSize == PageSize);
|
||||
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
|
||||
|
||||
|
@ -79,6 +78,16 @@ namespace ams::kern::arch::arm64 {
|
|||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr size_t GetSmallerAlignment(size_t alignment) {
|
||||
MESOSPHERE_ASSERT(alignment > L3BlockSize);
|
||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) - 1));
|
||||
}
|
||||
|
||||
static constexpr size_t GetLargerAlignment(size_t alignment) {
|
||||
MESOSPHERE_ASSERT(alignment < L1BlockSize);
|
||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
||||
}
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
|
@ -164,7 +173,25 @@ namespace ams::kern::arch::arm64 {
|
|||
Result Finalize();
|
||||
private:
|
||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||
|
||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||
switch (page_size) {
|
||||
case L1BlockSize:
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
case L2TegraSmmuBlockSize:
|
||||
#endif
|
||||
case L2BlockSize:
|
||||
case L3BlockSize:
|
||||
break;
|
||||
case L2ContiguousBlockSize:
|
||||
case L3ContiguousBlockSize:
|
||||
entry_template.SetContiguous(true);
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||
}
|
||||
|
||||
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
|
|
|
@ -35,6 +35,18 @@ namespace ams::kern::arch::arm64 {
|
|||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(address, num_pages, state);
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
return this->page_table.GetPhysicalAddress(out, address);
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
||||
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||
return this->GetAddress() <= address && address < this->GetLastAddress();
|
||||
return this->GetAddress() <= address && address <= this->GetLastAddress();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
|
||||
|
@ -231,6 +231,7 @@ namespace ams::kern {
|
|||
};
|
||||
private:
|
||||
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
||||
public:
|
||||
using value_type = TreeType::value_type;
|
||||
using size_type = TreeType::size_type;
|
||||
using difference_type = TreeType::difference_type;
|
||||
|
@ -276,7 +277,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_INIT_ABORT();
|
||||
}
|
||||
|
||||
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) {
|
||||
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const {
|
||||
DerivedRegionExtents extents;
|
||||
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr);
|
||||
|
@ -479,12 +480,24 @@ namespace ams::kern {
|
|||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetTempRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelTemp);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
||||
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapPhysicalAddress(KMemoryRegion **out, KPhysicalAddress address) {
|
||||
if (auto it = GetPhysicalLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); it != GetPhysicalLinearMemoryRegionTree().end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
|
@ -493,6 +506,72 @@ namespace ams::kern {
|
|||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||
do {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
it++;
|
||||
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||
KMemoryRegionTree::const_iterator it = tree.end();
|
||||
if (hint != nullptr) {
|
||||
it = tree.iterator_to(*hint);
|
||||
}
|
||||
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||
it = tree.FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||
do {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (out) {
|
||||
*out = std::addressof(*it);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
it++;
|
||||
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
||||
size_t total_size = 0, kernel_size = 0;
|
||||
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
||||
|
|
|
@ -38,6 +38,7 @@ namespace ams::kern {
|
|||
constexpr size_t GetNumPages() const { return this->num_pages; }
|
||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||
|
||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
||||
|
|
|
@ -124,9 +124,9 @@ namespace ams::kern {
|
|||
bool enable_aslr;
|
||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
||||
KBlockInfoManager *block_info_manager;
|
||||
KMemoryRegion *cached_physical_linear_region;
|
||||
KMemoryRegion *cached_physical_heap_region;
|
||||
KMemoryRegion *cached_virtual_managed_pool_dram_region;
|
||||
const KMemoryRegion *cached_physical_linear_region;
|
||||
const KMemoryRegion *cached_physical_heap_region;
|
||||
const KMemoryRegion *cached_virtual_heap_region;
|
||||
MemoryFillValue heap_fill_value;
|
||||
MemoryFillValue ipc_fill_value;
|
||||
MemoryFillValue stack_fill_value;
|
||||
|
@ -137,7 +137,7 @@ namespace ams::kern {
|
|||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
||||
allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
||||
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_managed_pool_dram_region(),
|
||||
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||
{
|
||||
/* ... */
|
||||
|
@ -172,10 +172,27 @@ namespace ams::kern {
|
|||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
if (this->cached_physical_heap_region && this->cached_physical_heap_region->Contains(GetInteger(phys_addr))) {
|
||||
return true;
|
||||
}
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(&this->cached_physical_heap_region, phys_addr);
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, size, this->cached_physical_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, this->cached_virtual_heap_region);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, size, this->cached_virtual_heap_region);
|
||||
}
|
||||
|
||||
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||
|
@ -193,6 +210,7 @@ namespace ams::kern {
|
|||
|
||||
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
|
||||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||
|
||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
public:
|
||||
|
@ -203,6 +221,10 @@ namespace ams::kern {
|
|||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state);
|
||||
public:
|
||||
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue