kern: move SecureAppletMemory/KPageBuffer heap into the ResourceRegion

This commit is contained in:
Michael Scire 2022-10-11 23:14:15 -07:00 committed by SciresM
parent ea82889e6c
commit 5a918f3bc9
21 changed files with 282 additions and 100 deletions

View file

@ -20,6 +20,9 @@
namespace ams::kern::board::nintendo::nx {
class KSystemControl : public KSystemControlBase {
public:
/* This can be overridden as needed. */
static constexpr size_t SecureAppletMemorySize = 4_MB;
public:
class Init : public KSystemControlBase::Init {
public:

View file

@ -42,7 +42,6 @@ namespace ams::kern::init {
const KSlabResourceCounts &GetSlabResourceCounts();
size_t CalculateTotalSlabHeapSize();
NOINLINE void InitializeKPageBufferSlabHeap();
NOINLINE void InitializeSlabHeaps();
}

View file

@ -38,30 +38,37 @@ namespace ams::kern {
size_t m_peak;
size_t m_count;
KVirtualAddress m_address;
KVirtualAddress m_aligned_address;
size_t m_size;
public:
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(Null<KVirtualAddress>), m_size() { /* ... */ }
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(Null<KVirtualAddress>), m_aligned_address(Null<KVirtualAddress>), m_size() { /* ... */ }
Result Initialize(KVirtualAddress memory, size_t sz) {
Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
/* We need to have positive size. */
R_UNLESS(sz > 0, svc::ResultOutOfMemory());
R_UNLESS(size > 0, svc::ResultOutOfMemory());
/* Calculate management overhead. */
const size_t management_size = KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
const size_t allocatable_size = sz - management_size;
/* Set addresses. */
m_address = memory;
m_aligned_address = util::AlignDown(GetInteger(memory), align);
/* Calculate extents. */
const size_t managed_size = m_address + size - m_aligned_address;
const size_t overhead_size = util::AlignUp(KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), sizeof(PageBuffer));
R_UNLESS(overhead_size < size, svc::ResultOutOfMemory());
/* Set tracking fields. */
m_address = memory;
m_size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
m_count = allocatable_size / sizeof(PageBuffer);
R_UNLESS(m_count > 0, svc::ResultOutOfMemory());
m_size = util::AlignDown(size - overhead_size, sizeof(PageBuffer));
m_count = m_size / sizeof(PageBuffer);
/* Clear the management region. */
u64 *management_ptr = GetPointer<u64>(m_address + allocatable_size);
std::memset(management_ptr, 0, management_size);
u64 *management_ptr = GetPointer<u64>(m_address + size - overhead_size);
std::memset(management_ptr, 0, overhead_size);
/* Initialize the bitmap. */
m_page_bitmap.Initialize(management_ptr, m_count);
const size_t allocatable_region_size = (GetInteger(m_address) + size - overhead_size) - GetInteger(m_aligned_address);
MESOSPHERE_ABORT_UNLESS(allocatable_region_size >= sizeof(PageBuffer));
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
/* Free the pages to the bitmap. */
for (size_t i = 0; i < m_count; i++) {
@ -69,7 +76,7 @@ namespace ams::kern {
cpu::ClearPageToZero(GetPointer<PageBuffer>(m_address) + i);
/* Set the bit for the free page. */
m_page_bitmap.SetBit(i);
m_page_bitmap.SetBit((GetInteger(m_address) + (i * sizeof(PageBuffer)) - GetInteger(m_aligned_address)) / sizeof(PageBuffer));
}
R_SUCCEED();
@ -98,7 +105,28 @@ namespace ams::kern {
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_address) + offset;
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
PageBuffer *Allocate(size_t count) {
/* Take the lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(m_lock);
/* Find a random free block. */
ssize_t soffset = m_page_bitmap.FindFreeRange(count);
if (AMS_UNLIKELY(soffset < 0)) {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
/* Update our tracking. */
m_page_bitmap.ClearRange(offset, count);
m_used += count;
m_peak = std::max(m_peak, m_used);
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
void Free(PageBuffer *pb) {
@ -110,7 +138,7 @@ namespace ams::kern {
KScopedSpinLock lk(m_lock);
/* Set the bit for the free page. */
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_address)) / sizeof(PageBuffer);
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
/* Decrement our used count. */

View file

@ -43,6 +43,7 @@ namespace ams::kern {
KMemoryState_FlagCanMapProcess = (1 << 23),
KMemoryState_FlagCanChangeAttribute = (1 << 24),
KMemoryState_FlagCanCodeMemory = (1 << 25),
KMemoryState_FlagLinearMapped = (1 << 26),
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
@ -50,16 +51,18 @@ namespace ams::kern {
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagCanChangeAttribute,
KMemoryState_FlagCanChangeAttribute | KMemoryState_FlagLinearMapped,
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
KMemoryState_FlagMapped | KMemoryState_FlagCode |
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted,
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagLinearMapped,
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap,
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
KMemoryState_FlagLinearMapped,
KMemoryState_Free = ams::svc::MemoryState_Free,
@ -68,7 +71,7 @@ namespace ams::kern {
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
/* KMemoryState_Alias was removed after 1.0.0. */
@ -82,7 +85,7 @@ namespace ams::kern {
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagLinearMapped,
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
@ -90,7 +93,7 @@ namespace ams::kern {
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
@ -103,8 +106,8 @@ namespace ams::kern {
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug,
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug | KMemoryState_FlagLinearMapped,
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
};
@ -113,25 +116,25 @@ namespace ams::kern {
static_assert(KMemoryState_Free == 0x00000000);
static_assert(KMemoryState_Io == 0x00182001);
static_assert(KMemoryState_Static == 0x00042002);
static_assert(KMemoryState_Code == 0x00DC7E03);
static_assert(KMemoryState_CodeData == 0x03FEBD04);
static_assert(KMemoryState_Normal == 0x037EBD05);
static_assert(KMemoryState_Shared == 0x00402006);
static_assert(KMemoryState_Code == 0x04DC7E03);
static_assert(KMemoryState_CodeData == 0x07FEBD04);
static_assert(KMemoryState_Normal == 0x077EBD05);
static_assert(KMemoryState_Shared == 0x04402006);
static_assert(KMemoryState_AliasCode == 0x00DD7E08);
static_assert(KMemoryState_AliasCodeData == 0x03FFBD09);
static_assert(KMemoryState_Ipc == 0x005C3C0A);
static_assert(KMemoryState_Stack == 0x005C3C0B);
static_assert(KMemoryState_ThreadLocal == 0x0040200C);
static_assert(KMemoryState_Transfered == 0x015C3C0D);
static_assert(KMemoryState_SharedTransfered == 0x005C380E);
static_assert(KMemoryState_SharedCode == 0x0040380F);
static_assert(KMemoryState_AliasCode == 0x04DD7E08);
static_assert(KMemoryState_AliasCodeData == 0x07FFBD09);
static_assert(KMemoryState_Ipc == 0x045C3C0A);
static_assert(KMemoryState_Stack == 0x045C3C0B);
static_assert(KMemoryState_ThreadLocal == 0x0400200C);
static_assert(KMemoryState_Transfered == 0x055C3C0D);
static_assert(KMemoryState_SharedTransfered == 0x045C380E);
static_assert(KMemoryState_SharedCode == 0x0440380F);
static_assert(KMemoryState_Inaccessible == 0x00000010);
static_assert(KMemoryState_NonSecureIpc == 0x005C3811);
static_assert(KMemoryState_NonDeviceIpc == 0x004C2812);
static_assert(KMemoryState_NonSecureIpc == 0x045C3811);
static_assert(KMemoryState_NonDeviceIpc == 0x044C2812);
static_assert(KMemoryState_Kernel == 0x00002013);
static_assert(KMemoryState_GeneratedCode == 0x00402214);
static_assert(KMemoryState_CodeOut == 0x00402015);
static_assert(KMemoryState_GeneratedCode == 0x04402214);
static_assert(KMemoryState_CodeOut == 0x04402015);
static_assert(KMemoryState_Coverage == 0x00002016);
#endif

View file

@ -50,9 +50,11 @@ namespace ams::kern {
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x800. */
constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize + KernelPageBufferHeapSize;
class KMemoryLayout {
private:
@ -150,6 +152,8 @@ namespace ams::kern {
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetSecureAppletMemoryRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
@ -209,6 +213,7 @@ namespace ams::kern {
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelBase); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelCodeRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSlabRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSecureAppletMemory); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }

View file

@ -211,6 +211,8 @@ namespace ams::kern {
static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
@ -251,6 +253,9 @@ namespace ams::kern {
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
@ -327,6 +332,8 @@ namespace ams::kern {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View file

@ -113,11 +113,12 @@ namespace ams::kern {
static constexpr size_t MaxDepth = 4;
private:
u64 *m_bit_storages[MaxDepth];
u64 *m_end_storages[MaxDepth];
RandomBitGenerator m_rng;
size_t m_num_bits;
size_t m_used_depths;
public:
KPageBitmap() : m_bit_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ }
KPageBitmap() : m_bit_storages(), m_end_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ }
constexpr size_t GetNumBits() const { return m_num_bits; }
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(m_used_depths) - 1; }
@ -135,6 +136,7 @@ namespace ams::kern {
m_bit_storages[depth] = storage;
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
storage += size;
m_end_storages[depth] = storage;
}
return storage;
@ -171,6 +173,45 @@ namespace ams::kern {
return static_cast<ssize_t>(offset);
}
ssize_t FindFreeRange(size_t count) {
/* Check that it is possible to find a range. */
const u64 * const storage_start = m_bit_storages[m_used_depths - 1];
const u64 * const storage_end = m_end_storages[m_used_depths - 1];
/* If we don't have a storage to iterate (or want more blocks than fit in a single storage), we can't find a free range. */
if (!(storage_start < storage_end && count <= BITSIZEOF(u64))) {
return -1;
}
/* Walk the storages to select a random free range. */
const size_t options_per_storage = std::max<size_t>(BITSIZEOF(u64) / count, 1);
const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1);
const u64 free_mask = (static_cast<u64>(1) << count) - 1;
size_t num_valid_options = 0;
ssize_t chosen_offset = -1;
for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) {
u64 storage = storage_start[storage_index];
for (size_t option = 0; option < options_per_storage; ++option) {
if ((storage & free_mask) == free_mask) {
/* We've found a new valid option. */
++num_valid_options;
/* Select the Kth valid option with probability 1/K. This leads to an overall uniform distribution. */
if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) {
/* This is our first option, so select it. */
chosen_offset = storage_index * BITSIZEOF(u64) + option * count;
}
}
storage >>= count;
}
}
/* Return the random offset we chose.*/
return chosen_offset;
}
void SetBit(size_t offset) {
this->SetBit(this->GetHighestDepthIndex(), offset);
m_num_bits++;

View file

@ -16,12 +16,33 @@
#pragma once
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_dynamic_page_manager.hpp>
namespace ams::kern {
class KPageBuffer : public KSlabAllocated<KPageBuffer> {
class KDynamicPageManager;
class KPageBuffer;
class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
public:
static constexpr size_t BufferSize = PageSize;
static constinit inline size_t s_buffer_count = 0;
private:
alignas(PageSize) u8 m_buffer[PageSize];
size_t m_obj_size{};
public:
constexpr KPageBufferSlabHeap() = default;
/* See kern_init_slab_setup.cpp for definition. */
void Initialize(KDynamicPageManager &allocator);
KPageBuffer *Allocate();
void Free(KPageBuffer *pb);
};
class KPageBuffer {
private:
u8 m_buffer[KPageBufferSlabHeap::BufferSize];
public:
KPageBuffer() {
std::memset(m_buffer, 0, sizeof(m_buffer));
@ -39,8 +60,49 @@ namespace ams::kern {
return GetPointer<KPageBuffer>(virt_addr);
}
private:
static constinit inline KPageBufferSlabHeap s_slab_heap;
public:
static void InitializeSlabHeap(KDynamicPageManager &allocator) {
s_slab_heap.Initialize(allocator);
}
static KPageBuffer *Allocate() {
return s_slab_heap.Allocate();
}
static void Free(KPageBuffer *obj) {
s_slab_heap.Free(obj);
}
template<size_t ExpectedSize>
static ALWAYS_INLINE KPageBuffer *AllocateChecked() {
/* Check that the allocation is valid. */
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
return Allocate();
}
template<size_t ExpectedSize>
static ALWAYS_INLINE void FreeChecked(KPageBuffer *obj) {
/* Check that the free is valid. */
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
return Free(obj);
}
};
static_assert(sizeof(KPageBuffer) == PageSize);
static_assert(alignof(KPageBuffer) == PageSize);
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
ALWAYS_INLINE KPageBuffer *KPageBufferSlabHeap::Allocate() {
KPageBuffer *pb = static_cast<KPageBuffer *>(KSlabHeapImpl::Allocate());
if (AMS_LIKELY(pb != nullptr)) {
std::construct_at(pb);
}
return pb;
}
ALWAYS_INLINE void KPageBufferSlabHeap::Free(KPageBuffer *pb) {
KSlabHeapImpl::Free(pb);
}
}

View file

@ -26,6 +26,9 @@ namespace ams::kern {
namespace ams::kern {
class KSystemControlBase {
public:
/* This can be overridden as needed. */
static constexpr size_t SecureAppletMemorySize = 0;
protected:
/* Nintendo uses std::mt19937_t for randomness. */
/* To save space (and because mt19337_t isn't secure anyway), */

View file

@ -63,7 +63,7 @@ namespace ams::kern {
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 70;
static constexpr size_t ReservedDynamicPageCount = 64;
private:
static State s_state;
static KResourceLimit s_system_resource_limit;