mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-29 22:15:17 -04:00
kern: KMemoryManager/KPageGroup use physical addresses instead of virtual, now
This commit is contained in:
parent
2c4bd44d7e
commit
2f2c36b22b
17 changed files with 305 additions and 260 deletions
|
@ -149,6 +149,7 @@ namespace ams::kern {
|
|||
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
||||
|
||||
static NOINLINE const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
|
||||
static NOINLINE const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
|
||||
|
||||
static NOINLINE const KMemoryRegion *GetPhysicalKernelTraceBufferRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); }
|
||||
static NOINLINE const KMemoryRegion *GetPhysicalOnMemoryBootImageRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); }
|
||||
|
|
|
@ -70,37 +70,37 @@ namespace ams::kern {
|
|||
public:
|
||||
Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ }
|
||||
|
||||
size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
||||
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||
|
||||
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
||||
|
||||
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
||||
|
||||
void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
||||
void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
||||
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||
|
||||
bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern);
|
||||
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
|
||||
|
||||
constexpr Pool GetPool() const { return m_pool; }
|
||||
constexpr size_t GetSize() const { return m_heap.GetSize(); }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
|
||||
constexpr KPhysicalAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
|
||||
|
||||
size_t GetFreeSize() const { return m_heap.GetFreeSize(); }
|
||||
|
||||
void DumpFreeList() const { return m_heap.DumpFreeList(); }
|
||||
|
||||
constexpr size_t GetPageOffset(KVirtualAddress address) const { return m_heap.GetPageOffset(address); }
|
||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
|
||||
constexpr size_t GetPageOffset(KPhysicalAddress address) const { return m_heap.GetPageOffset(address); }
|
||||
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
|
||||
|
||||
constexpr void SetNext(Impl *n) { m_next = n; }
|
||||
constexpr void SetPrev(Impl *n) { m_prev = n; }
|
||||
constexpr Impl *GetNext() const { return m_next; }
|
||||
constexpr Impl *GetPrev() const { return m_prev; }
|
||||
|
||||
void OpenFirst(KVirtualAddress address, size_t num_pages) {
|
||||
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
|
@ -111,7 +111,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Open(KVirtualAddress address, size_t num_pages) {
|
||||
void Open(KPhysicalAddress address, size_t num_pages) {
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
|
@ -122,7 +122,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Close(KVirtualAddress address, size_t num_pages) {
|
||||
void Close(KPhysicalAddress address, size_t num_pages) {
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
|
||||
|
@ -164,12 +164,12 @@ namespace ams::kern {
|
|||
u64 m_optimized_process_ids[Pool_Count];
|
||||
bool m_has_optimized_process[Pool_Count];
|
||||
private:
|
||||
Impl &GetManager(KVirtualAddress address) {
|
||||
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||
Impl &GetManager(KPhysicalAddress address) {
|
||||
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
const Impl &GetManager(KVirtualAddress address) const {
|
||||
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||
const Impl &GetManager(KPhysicalAddress address) const {
|
||||
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||
|
@ -197,15 +197,15 @@ namespace ams::kern {
|
|||
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
||||
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||
|
||||
NOINLINE KVirtualAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
||||
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
||||
|
||||
Pool GetPool(KVirtualAddress address) const {
|
||||
Pool GetPool(KPhysicalAddress address) const {
|
||||
return this->GetManager(address).GetPool();
|
||||
}
|
||||
|
||||
void Open(KVirtualAddress address, size_t num_pages) {
|
||||
void Open(KPhysicalAddress address, size_t num_pages) {
|
||||
/* Repeatedly open references until we've done so for all pages. */
|
||||
while (num_pages) {
|
||||
auto &manager = this->GetManager(address);
|
||||
|
@ -221,7 +221,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Close(KVirtualAddress address, size_t num_pages) {
|
||||
void Close(KPhysicalAddress address, size_t num_pages) {
|
||||
/* Repeatedly close references until we've done so for all pages. */
|
||||
while (num_pages) {
|
||||
auto &manager = this->GetManager(address);
|
||||
|
|
|
@ -22,78 +22,121 @@ namespace ams::kern {
|
|||
|
||||
class KBlockInfoManager;
|
||||
|
||||
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
||||
class KPageGroup;
|
||||
|
||||
class KBlockInfo {
|
||||
private:
|
||||
KVirtualAddress m_address;
|
||||
size_t m_num_pages;
|
||||
friend class KPageGroup;
|
||||
private:
|
||||
KBlockInfo *m_next{};
|
||||
u32 m_page_index{};
|
||||
u32 m_num_pages{};
|
||||
public:
|
||||
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), m_address(), m_num_pages() { /* ... */ }
|
||||
constexpr KBlockInfo() = default;
|
||||
|
||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
||||
m_address = addr;
|
||||
m_num_pages = np;
|
||||
constexpr ALWAYS_INLINE void Initialize(KPhysicalAddress addr, size_t np) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
|
||||
MESOSPHERE_ASSERT(static_cast<u32>(np) == np);
|
||||
|
||||
m_page_index = GetInteger(addr) / PageSize;
|
||||
m_num_pages = np;
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||
constexpr size_t GetNumPages() const { return m_num_pages; }
|
||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetAddress() const { return m_page_index * PageSize; }
|
||||
constexpr ALWAYS_INLINE size_t GetNumPages() const { return m_num_pages; }
|
||||
constexpr ALWAYS_INLINE size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetEndAddress() const { return (m_page_index + m_num_pages) * PageSize; }
|
||||
constexpr ALWAYS_INLINE KPhysicalAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||
|
||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||
return m_address == rhs.m_address && m_num_pages == rhs.m_num_pages;
|
||||
constexpr ALWAYS_INLINE KBlockInfo *GetNext() const { return m_next; }
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||
return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const KBlockInfo &rhs) const {
|
||||
constexpr ALWAYS_INLINE bool operator==(const KBlockInfo &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const KBlockInfo &rhs) const {
|
||||
constexpr ALWAYS_INLINE bool operator!=(const KBlockInfo &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
||||
const KVirtualAddress end = this->GetEndAddress();
|
||||
constexpr ALWAYS_INLINE bool IsStrictlyBefore(KPhysicalAddress addr) const {
|
||||
const KPhysicalAddress end = this->GetEndAddress();
|
||||
|
||||
if (m_address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
||||
if (m_page_index != 0 && end == Null<KPhysicalAddress>) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return end < addr;
|
||||
}
|
||||
|
||||
constexpr bool operator<(KVirtualAddress addr) const {
|
||||
constexpr ALWAYS_INLINE bool operator<(KPhysicalAddress addr) const {
|
||||
return this->IsStrictlyBefore(addr);
|
||||
}
|
||||
|
||||
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
||||
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
||||
constexpr ALWAYS_INLINE bool TryConcatenate(KPhysicalAddress addr, size_t np) {
|
||||
if (addr != Null<KPhysicalAddress> && addr == this->GetEndAddress()) {
|
||||
m_num_pages += np;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
private:
|
||||
constexpr ALWAYS_INLINE void SetNext(KBlockInfo *next) {
|
||||
m_next = next;
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(KBlockInfo) <= 0x10);
|
||||
|
||||
class KPageGroup {
|
||||
public:
|
||||
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
||||
using iterator = BlockInfoList::const_iterator;
|
||||
class Iterator {
|
||||
public:
|
||||
using iterator_category = std::forward_iterator_tag;
|
||||
using value_type = const KBlockInfo;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using pointer = value_type *;
|
||||
using reference = value_type &;
|
||||
private:
|
||||
pointer m_node;
|
||||
public:
|
||||
constexpr explicit ALWAYS_INLINE Iterator(pointer n) : m_node(n) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE bool operator==(const Iterator &rhs) const { return m_node == rhs.m_node; }
|
||||
constexpr ALWAYS_INLINE bool operator!=(const Iterator &rhs) const { return !(*this == rhs); }
|
||||
|
||||
constexpr ALWAYS_INLINE pointer operator->() const { return m_node; }
|
||||
constexpr ALWAYS_INLINE reference operator*() const { return *m_node; }
|
||||
|
||||
constexpr ALWAYS_INLINE Iterator &operator++() {
|
||||
m_node = m_node->GetNext();
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
};
|
||||
private:
|
||||
BlockInfoList m_block_list;
|
||||
KBlockInfo *m_first_block;
|
||||
KBlockInfo *m_last_block;
|
||||
KBlockInfoManager *m_manager;
|
||||
public:
|
||||
explicit KPageGroup(KBlockInfoManager *m) : m_block_list(), m_manager(m) { /* ... */ }
|
||||
explicit KPageGroup(KBlockInfoManager *m) : m_first_block(), m_last_block(), m_manager(m) { /* ... */ }
|
||||
~KPageGroup() { this->Finalize(); }
|
||||
|
||||
void CloseAndReset();
|
||||
void Finalize();
|
||||
|
||||
iterator begin() const { return m_block_list.begin(); }
|
||||
iterator end() const { return m_block_list.end(); }
|
||||
bool empty() const { return m_block_list.empty(); }
|
||||
ALWAYS_INLINE Iterator begin() const { return Iterator{m_first_block}; }
|
||||
ALWAYS_INLINE Iterator end() const { return Iterator{nullptr}; }
|
||||
ALWAYS_INLINE bool empty() const { return m_first_block == nullptr; }
|
||||
|
||||
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
||||
Result AddBlock(KPhysicalAddress addr, size_t num_pages);
|
||||
void Open() const;
|
||||
void Close() const;
|
||||
|
||||
|
@ -101,11 +144,11 @@ namespace ams::kern {
|
|||
|
||||
bool IsEquivalentTo(const KPageGroup &rhs) const;
|
||||
|
||||
bool operator==(const KPageGroup &rhs) const {
|
||||
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
bool operator!=(const KPageGroup &rhs) const {
|
||||
ALWAYS_INLINE bool operator!=(const KPageGroup &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace ams::kern {
|
|||
class Block {
|
||||
private:
|
||||
KPageBitmap m_bitmap;
|
||||
KVirtualAddress m_heap_address;
|
||||
KPhysicalAddress m_heap_address;
|
||||
uintptr_t m_end_offset;
|
||||
size_t m_block_shift;
|
||||
size_t m_next_block_shift;
|
||||
|
@ -68,13 +68,13 @@ namespace ams::kern {
|
|||
constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); }
|
||||
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
||||
|
||||
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||
u64 *Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||
/* Set shifts. */
|
||||
m_block_shift = bs;
|
||||
m_next_block_shift = nbs;
|
||||
|
||||
/* Align up the address. */
|
||||
KVirtualAddress end = addr + size;
|
||||
KPhysicalAddress end = addr + size;
|
||||
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift);
|
||||
addr = util::AlignDown(GetInteger(addr), align);
|
||||
end = util::AlignUp(GetInteger(end), align);
|
||||
|
@ -84,7 +84,7 @@ namespace ams::kern {
|
|||
return m_bitmap.Initialize(bit_storage, m_end_offset);
|
||||
}
|
||||
|
||||
KVirtualAddress PushBlock(KVirtualAddress address) {
|
||||
KPhysicalAddress PushBlock(KPhysicalAddress address) {
|
||||
/* Set the bit for the free block. */
|
||||
size_t offset = (address - m_heap_address) >> this->GetShift();
|
||||
m_bitmap.SetBit(offset);
|
||||
|
@ -99,14 +99,14 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* We couldn't coalesce, or we're already as big as possible. */
|
||||
return Null<KVirtualAddress>;
|
||||
return Null<KPhysicalAddress>;
|
||||
}
|
||||
|
||||
KVirtualAddress PopBlock(bool random) {
|
||||
KPhysicalAddress PopBlock(bool random) {
|
||||
/* Find a free block. */
|
||||
ssize_t soffset = m_bitmap.FindFreeBlock(random);
|
||||
if (soffset < 0) {
|
||||
return Null<KVirtualAddress>;
|
||||
return Null<KPhysicalAddress>;
|
||||
}
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
|
@ -123,27 +123,27 @@ namespace ams::kern {
|
|||
}
|
||||
};
|
||||
private:
|
||||
KVirtualAddress m_heap_address;
|
||||
KPhysicalAddress m_heap_address;
|
||||
size_t m_heap_size;
|
||||
size_t m_initial_used_size;
|
||||
size_t m_num_blocks;
|
||||
Block m_blocks[NumMemoryBlockPageShifts];
|
||||
private:
|
||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
size_t GetNumFreePages() const;
|
||||
|
||||
void FreeBlock(KVirtualAddress block, s32 index);
|
||||
void FreeBlock(KPhysicalAddress block, s32 index);
|
||||
public:
|
||||
KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
|
||||
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
|
||||
constexpr size_t GetSize() const { return m_heap_size; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
||||
constexpr KPhysicalAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr size_t GetPageOffset(KPhysicalAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
||||
|
||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) {
|
||||
return Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) {
|
||||
return this->Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||
}
|
||||
|
||||
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
|
||||
|
@ -158,8 +158,8 @@ namespace ams::kern {
|
|||
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
||||
}
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index, bool random);
|
||||
void Free(KVirtualAddress addr, size_t num_pages);
|
||||
KPhysicalAddress AllocateBlock(s32 index, bool random);
|
||||
void Free(KPhysicalAddress addr, size_t num_pages);
|
||||
private:
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
public:
|
||||
|
|
|
@ -57,7 +57,7 @@ namespace ams::kern {
|
|||
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||
|
||||
struct MemoryRange {
|
||||
KVirtualAddress address;
|
||||
KPhysicalAddress address;
|
||||
size_t size;
|
||||
|
||||
void Close();
|
||||
|
@ -178,7 +178,6 @@ namespace ams::kern {
|
|||
KResourceLimit *m_resource_limit{};
|
||||
const KMemoryRegion *m_cached_physical_linear_region{};
|
||||
const KMemoryRegion *m_cached_physical_heap_region{};
|
||||
const KMemoryRegion *m_cached_virtual_heap_region{};
|
||||
MemoryFillValue m_heap_fill_value{};
|
||||
MemoryFillValue m_ipc_fill_value{};
|
||||
MemoryFillValue m_stack_fill_value{};
|
||||
|
@ -257,18 +256,6 @@ namespace ams::kern {
|
|||
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue