mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-21 02:15:07 -04:00
kern: support dynamic resource expansion for system heaps/events/sessions.
This commit is contained in:
parent
2b91956051
commit
2c4bd44d7e
37 changed files with 856 additions and 328 deletions
|
@ -166,7 +166,7 @@ namespace ams::kern::arch::arm64 {
|
|||
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||
/* Initialize basic fields. */
|
||||
m_asid = 0;
|
||||
m_manager = std::addressof(Kernel::GetPageTableManager());
|
||||
m_manager = std::addressof(Kernel::GetSystemPageTableManager());
|
||||
|
||||
/* Allocate a page for ttbr. */
|
||||
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
|
||||
|
|
|
@ -650,11 +650,11 @@ namespace ams::kern::board::nintendo::nx {
|
|||
g_memory_controller_address = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_MemoryController);
|
||||
|
||||
/* Allocate a page to use as a reserved/no device table. */
|
||||
const KVirtualAddress table_virt_addr = Kernel::GetPageTableManager().Allocate();
|
||||
const KVirtualAddress table_virt_addr = Kernel::GetSystemPageTableManager().Allocate();
|
||||
MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null<KVirtualAddress>);
|
||||
const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr);
|
||||
MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr));
|
||||
Kernel::GetPageTableManager().Open(table_virt_addr, 1);
|
||||
Kernel::GetSystemPageTableManager().Open(table_virt_addr, 1);
|
||||
|
||||
/* Clear the page and save it. */
|
||||
/* NOTE: Nintendo does not check the result of StoreDataCache. */
|
||||
|
@ -779,7 +779,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
|
||||
|
||||
/* Get the page table manager. */
|
||||
auto &ptm = Kernel::GetPageTableManager();
|
||||
auto &ptm = Kernel::GetSystemPageTableManager();
|
||||
|
||||
/* Clear the tables. */
|
||||
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
|
||||
|
@ -840,7 +840,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
|
||||
void KDevicePageTable::Finalize() {
|
||||
/* Get the page table manager. */
|
||||
auto &ptm = Kernel::GetPageTableManager();
|
||||
auto &ptm = Kernel::GetSystemPageTableManager();
|
||||
|
||||
/* Detach from all devices. */
|
||||
{
|
||||
|
@ -1017,7 +1017,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
|
||||
/* Get the memory manager and page table manager. */
|
||||
KMemoryManager &mm = Kernel::GetMemoryManager();
|
||||
KPageTableManager &ptm = Kernel::GetPageTableManager();
|
||||
KPageTableManager &ptm = Kernel::GetSystemPageTableManager();
|
||||
|
||||
/* Cache permissions. */
|
||||
const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0;
|
||||
|
@ -1181,10 +1181,10 @@ namespace ams::kern::board::nintendo::nx {
|
|||
|
||||
/* Get the memory manager and page table manager. */
|
||||
KMemoryManager &mm = Kernel::GetMemoryManager();
|
||||
KPageTableManager &ptm = Kernel::GetPageTableManager();
|
||||
KPageTableManager &ptm = Kernel::GetSystemPageTableManager();
|
||||
|
||||
/* Make a page group for the pages we're closing. */
|
||||
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||
KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||
|
||||
/* Walk the directory. */
|
||||
u64 remaining = size;
|
||||
|
|
|
@ -459,6 +459,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
|
||||
KTargetSystem::EnableDebugMemoryFill(kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
|
||||
KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
|
||||
KTargetSystem::EnableDynamicResourceLimits(!kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>());
|
||||
KTargetSystem::EnableUserPmuAccess(kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
|
||||
|
||||
g_call_smc_on_panic = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
|
||||
|
|
|
@ -80,14 +80,15 @@ namespace ams::kern::board::nintendo::nx::smc {
|
|||
};
|
||||
|
||||
struct KernelConfiguration {
|
||||
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
||||
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
||||
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
||||
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
||||
using Reserved4 = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 4, u32>;
|
||||
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved4::Next, 1, bool>;
|
||||
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
||||
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, smc::MemorySize>;
|
||||
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
||||
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
||||
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
||||
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
||||
using DisableDynamicResourceLimits = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 1, bool>;
|
||||
using Reserved5 = util::BitPack32::Field<DisableDynamicResourceLimits::Next, 3, u32>;
|
||||
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved5::Next, 1, bool>;
|
||||
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
||||
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, smc::MemorySize>;
|
||||
};
|
||||
|
||||
enum UserRebootType {
|
||||
|
|
|
@ -173,8 +173,9 @@ namespace ams::kern::init {
|
|||
}
|
||||
|
||||
void InitializeSlabHeaps() {
|
||||
/* Get the start of the slab region, since that's where we'll be working. */
|
||||
KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress();
|
||||
/* Get the slab region, since that's where we'll be working. */
|
||||
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
|
||||
KVirtualAddress address = slab_region.GetAddress();
|
||||
|
||||
/* Initialize slab type array to be in sorted order. */
|
||||
KSlabType slab_types[KSlabType_Count];
|
||||
|
@ -202,13 +203,21 @@ namespace ams::kern::init {
|
|||
}
|
||||
}
|
||||
|
||||
/* Track the gaps, so that we can free them to the unused slab tree. */
|
||||
KVirtualAddress gap_start = address;
|
||||
size_t gap_size = 0;
|
||||
|
||||
for (size_t i = 0; i < util::size(slab_types); i++) {
|
||||
/* Add the random gap to the address. */
|
||||
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||
address += cur_gap;
|
||||
gap_size += cur_gap;
|
||||
|
||||
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
||||
case KSlabType_##NAME: \
|
||||
address = InitializeSlabHeap<NAME>(address, COUNT); \
|
||||
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
||||
case KSlabType_##NAME: \
|
||||
if (COUNT > 0) { \
|
||||
address = InitializeSlabHeap<NAME>(address, COUNT); \
|
||||
} \
|
||||
break;
|
||||
|
||||
/* Initialize the slabheap. */
|
||||
|
@ -218,7 +227,17 @@ namespace ams::kern::init {
|
|||
/* If we somehow get an invalid type, abort. */
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
/* If we've hit the end of a gap, free it. */
|
||||
if (gap_start + gap_size != address) {
|
||||
FreeUnusedSlabMemory(gap_start, gap_size);
|
||||
gap_start = address;
|
||||
gap_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Free the end of the slab region. */
|
||||
FreeUnusedSlabMemory(gap_start, gap_size + (slab_region.GetEndAddress() - GetInteger(address)));
|
||||
}
|
||||
|
||||
}
|
|
@ -128,13 +128,13 @@ namespace ams::kern {
|
|||
KProcess *new_process = nullptr;
|
||||
{
|
||||
/* Make page groups to represent the data. */
|
||||
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||
KPageGroup workaround_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||
KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||
KPageGroup workaround_pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||
|
||||
/* Populate the page group to represent the data. */
|
||||
{
|
||||
/* Allocate the previously unreserved pages. */
|
||||
KPageGroup unreserve_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||
KPageGroup unreserve_pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||
|
||||
/* Add the previously reserved pages. */
|
||||
|
|
|
@ -62,11 +62,46 @@ namespace ams::kern {
|
|||
Result KClientPort::CreateSession(KClientSession **out) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Declare the session we're going to allocate. */
|
||||
KSession *session;
|
||||
|
||||
/* Reserve a new session from the resource limit. */
|
||||
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
if (session_reservation.Succeeded()) {
|
||||
/* Allocate a session normally. */
|
||||
session = KSession::Create();
|
||||
} else {
|
||||
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||
|
||||
/* Try to allocate a session from unused slab memory. */
|
||||
session = KSession::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||
|
||||
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
|
||||
request->Close();
|
||||
}
|
||||
}
|
||||
session_guard.Cancel();
|
||||
|
||||
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||
}
|
||||
|
||||
/* Check that we successfully created a session. */
|
||||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
|
||||
/* Update the session counts. */
|
||||
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
/* Atomically increment the number of sessions. */
|
||||
s32 new_sessions;
|
||||
|
@ -90,18 +125,7 @@ namespace ams::kern {
|
|||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a new session. */
|
||||
KSession *session = KSession::Create();
|
||||
if (session == nullptr) {
|
||||
/* Decrement the session count. */
|
||||
const auto prev = m_num_sessions--;
|
||||
if (prev == m_max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return svc::ResultOutOfResource();
|
||||
}
|
||||
count_guard.Cancel();
|
||||
|
||||
/* Initialize the session. */
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
@ -128,11 +152,32 @@ namespace ams::kern {
|
|||
Result KClientPort::CreateLightSession(KLightClientSession **out) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Declare the session we're going to allocate. */
|
||||
KLightSession *session;
|
||||
|
||||
/* Reserve a new session from the resource limit. */
|
||||
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
if (session_reservation.Succeeded()) {
|
||||
/* Allocate a session normally. */
|
||||
session = KLightSession::Create();
|
||||
} else {
|
||||
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||
|
||||
/* Try to allocate a session from unused slab memory. */
|
||||
session = KLightSession::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||
|
||||
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||
}
|
||||
|
||||
/* Check that we successfully created a session. */
|
||||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Update the session counts. */
|
||||
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
/* Atomically increment the number of sessions. */
|
||||
s32 new_sessions;
|
||||
|
@ -156,18 +201,7 @@ namespace ams::kern {
|
|||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
/* Create a new session. */
|
||||
KLightSession *session = KLightSession::Create();
|
||||
if (session == nullptr) {
|
||||
/* Decrement the session count. */
|
||||
const auto prev = m_num_sessions--;
|
||||
if (prev == m_max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return svc::ResultOutOfResource();
|
||||
}
|
||||
count_guard.Cancel();
|
||||
|
||||
/* Initialize the session. */
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
|
|
@ -369,14 +369,14 @@ namespace ams::kern::KDumpObject {
|
|||
/* KBlockInfo slab. */
|
||||
{
|
||||
MESOSPHERE_RELEASE_LOG("KBlockInfo\n");
|
||||
auto &manager = Kernel::GetBlockInfoManager();
|
||||
auto &manager = Kernel::GetSystemBlockInfoManager();
|
||||
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
||||
}
|
||||
|
||||
/* Page Table slab. */
|
||||
{
|
||||
MESOSPHERE_RELEASE_LOG("Page Table\n");
|
||||
auto &manager = Kernel::GetPageTableManager();
|
||||
auto &manager = Kernel::GetSystemPageTableManager();
|
||||
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
|||
void KEvent::Finalize() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
|
||||
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList, true>::Finalize();
|
||||
}
|
||||
|
||||
Result KEvent::Signal() {
|
||||
|
|
|
@ -37,6 +37,7 @@ namespace ams::kern {
|
|||
|
||||
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
|
||||
/* Clear the management region to zero. */
|
||||
|
||||
const KVirtualAddress management_region_end = management_region + management_region_size;
|
||||
std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ namespace ams::kern {
|
|||
m_mapped_ipc_server_memory = 0;
|
||||
|
||||
m_memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
|
||||
m_block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
m_block_info_manager = std::addressof(Kernel::GetSystemBlockInfoManager());
|
||||
m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit());
|
||||
|
||||
m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||
|
|
|
@ -260,8 +260,8 @@ namespace ams::kern {
|
|||
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
|
||||
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||
auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
auto *pt_manager = std::addressof(Kernel::GetPageTableManager());
|
||||
auto *block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
||||
auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
||||
}
|
||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||
|
@ -326,12 +326,17 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(m_system_resource_address != Null<KVirtualAddress>);
|
||||
m_system_resource_num_pages = system_resource_num_pages;
|
||||
|
||||
/* Initialize managers. */
|
||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize);
|
||||
/* Initialize slab heaps. */
|
||||
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(system_resource_size), PageSize);
|
||||
m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size);
|
||||
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
||||
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
||||
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
||||
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
||||
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||
|
||||
/* Initialize managers. */
|
||||
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_page_table_heap));
|
||||
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_memory_block_heap));
|
||||
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_block_info_heap));
|
||||
|
||||
mem_block_manager = std::addressof(m_memory_block_slab_manager);
|
||||
block_info_manager = std::addressof(m_block_info_manager);
|
||||
|
@ -339,8 +344,8 @@ namespace ams::kern {
|
|||
} else {
|
||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
||||
mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||
block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
pt_manager = std::addressof(Kernel::GetPageTableManager());
|
||||
block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
||||
pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
||||
}
|
||||
|
||||
/* Ensure we don't leak any secure memory we allocated. */
|
||||
|
|
|
@ -49,7 +49,8 @@ namespace ams::kern {
|
|||
KScopedLightLock lk(m_lock);
|
||||
value = m_limit_values[which];
|
||||
MESOSPHERE_ASSERT(value >= 0);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||
}
|
||||
|
||||
|
@ -64,7 +65,8 @@ namespace ams::kern {
|
|||
KScopedLightLock lk(m_lock);
|
||||
value = m_current_values[which];
|
||||
MESOSPHERE_ASSERT(value >= 0);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||
}
|
||||
|
||||
|
@ -79,7 +81,8 @@ namespace ams::kern {
|
|||
KScopedLightLock lk(m_lock);
|
||||
value = m_peak_values[which];
|
||||
MESOSPHERE_ASSERT(value >= 0);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||
}
|
||||
|
||||
|
@ -93,7 +96,8 @@ namespace ams::kern {
|
|||
{
|
||||
KScopedLightLock lk(m_lock);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] >= 0);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||
value = m_limit_values[which] - m_current_values[which];
|
||||
}
|
||||
|
@ -113,6 +117,37 @@ namespace ams::kern {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KResourceLimit::Add(ams::svc::LimitableResource which, s64 value) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KTargetSystem::IsDynamicResourceLimitsEnabled());
|
||||
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
/* Check that this is a true increase. */
|
||||
MESOSPHERE_ABORT_UNLESS(value > 0);
|
||||
|
||||
/* Check that we can perform an increase. */
|
||||
MESOSPHERE_ABORT_UNLESS(m_current_values[which] <= m_peak_values[which]);
|
||||
MESOSPHERE_ABORT_UNLESS(m_peak_values[which] <= m_limit_values[which]);
|
||||
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] <= m_current_values[which]);
|
||||
|
||||
/* Check that the increase doesn't cause an overflow. */
|
||||
const auto increased_limit = m_limit_values[which] + value;
|
||||
const auto increased_current = m_current_values[which] + value;
|
||||
const auto increased_hint = m_current_hints[which] + value;
|
||||
MESOSPHERE_ABORT_UNLESS(m_limit_values[which] < increased_limit);
|
||||
MESOSPHERE_ABORT_UNLESS(m_current_values[which] < increased_current);
|
||||
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] < increased_hint);
|
||||
|
||||
/* Add the value. */
|
||||
m_limit_values[which] = increased_limit;
|
||||
m_current_values[which] = increased_current;
|
||||
m_current_hints[which] = increased_hint;
|
||||
|
||||
/* Update our peak. */
|
||||
m_peak_values[which] = std::max(m_peak_values[which], increased_current);
|
||||
}
|
||||
|
||||
bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) {
|
||||
return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout);
|
||||
}
|
||||
|
|
159
libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp
Normal file
159
libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
class KUnusedSlabMemory : public util::IntrusiveRedBlackTreeBaseNode<KUnusedSlabMemory> {
|
||||
NON_COPYABLE(KUnusedSlabMemory);
|
||||
NON_MOVEABLE(KUnusedSlabMemory);
|
||||
private:
|
||||
size_t m_size;
|
||||
public:
|
||||
struct RedBlackKeyType {
|
||||
size_t m_size;
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T> requires (std::same_as<T, KUnusedSlabMemory> || std::same_as<T, RedBlackKeyType>)
|
||||
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KUnusedSlabMemory &rhs) {
|
||||
if (lhs.GetSize() < rhs.GetSize()) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
public:
|
||||
constexpr KUnusedSlabMemory(size_t size) : m_size(size) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return reinterpret_cast<uintptr_t>(this); }
|
||||
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
|
||||
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<KUnusedSlabMemory>::value);
|
||||
|
||||
using KUnusedSlabMemoryTree = util::IntrusiveRedBlackTreeBaseTraits<KUnusedSlabMemory>::TreeType<KUnusedSlabMemory>;
|
||||
|
||||
constinit KLightLock g_unused_slab_memory_lock;
|
||||
constinit KUnusedSlabMemoryTree g_unused_slab_memory_tree;
|
||||
|
||||
}
|
||||
|
||||
KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment) {
|
||||
/* Acquire exclusive access to the memory tree. */
|
||||
KScopedLightLock lk(g_unused_slab_memory_lock);
|
||||
|
||||
/* Adjust size and alignment. */
|
||||
size = std::max(size, sizeof(KUnusedSlabMemory));
|
||||
alignment = std::max(alignment, alignof(KUnusedSlabMemory));
|
||||
|
||||
/* Find the smallest block which fits our allocation. */
|
||||
KUnusedSlabMemory *best_fit = std::addressof(*g_unused_slab_memory_tree.nfind_key({ size - 1 }));
|
||||
|
||||
/* Ensure that the chunk is valid. */
|
||||
size_t prefix_waste;
|
||||
KVirtualAddress alloc_start;
|
||||
KVirtualAddress alloc_last;
|
||||
KVirtualAddress alloc_end;
|
||||
KVirtualAddress chunk_last;
|
||||
KVirtualAddress chunk_end;
|
||||
while (true) {
|
||||
/* Check that we still have a chunk satisfying our size requirement. */
|
||||
if (AMS_UNLIKELY(best_fit == nullptr)) {
|
||||
return Null<KVirtualAddress>;
|
||||
}
|
||||
|
||||
/* Determine where the actual allocation would start. */
|
||||
alloc_start = util::AlignUp(GetInteger(best_fit->GetAddress()), alignment);
|
||||
if (AMS_LIKELY(alloc_start >= best_fit->GetAddress())) {
|
||||
prefix_waste = alloc_start - best_fit->GetAddress();
|
||||
alloc_end = alloc_start + size;
|
||||
alloc_last = alloc_end - 1;
|
||||
|
||||
/* Check that the allocation remains in bounds. */
|
||||
if (alloc_start <= alloc_last) {
|
||||
chunk_end = best_fit->GetAddress() + best_fit->GetSize();
|
||||
chunk_last = chunk_end - 1;
|
||||
if (AMS_LIKELY(alloc_last <= chunk_last)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Check the next smallest block. */
|
||||
best_fit = best_fit->GetNext();
|
||||
}
|
||||
|
||||
/* Remove the chunk we selected from the tree. */
|
||||
g_unused_slab_memory_tree.erase(g_unused_slab_memory_tree.iterator_to(*best_fit));
|
||||
std::destroy_at(best_fit);
|
||||
|
||||
/* If there's enough prefix waste due to alignment for a new chunk, insert it into the tree. */
|
||||
if (prefix_waste >= sizeof(KUnusedSlabMemory)) {
|
||||
std::construct_at(best_fit, prefix_waste);
|
||||
g_unused_slab_memory_tree.insert(*best_fit);
|
||||
}
|
||||
|
||||
/* If there's enough suffix waste after the allocation for a new chunk, insert it into the tree. */
|
||||
if (alloc_last < alloc_end + sizeof(KUnusedSlabMemory) - 1 && alloc_end + sizeof(KUnusedSlabMemory) - 1 <= chunk_last) {
|
||||
KUnusedSlabMemory *suffix_chunk = GetPointer<KUnusedSlabMemory>(alloc_end);
|
||||
std::construct_at(suffix_chunk, chunk_end - alloc_end);
|
||||
g_unused_slab_memory_tree.insert(*suffix_chunk);
|
||||
}
|
||||
|
||||
/* Return the allocated memory. */
|
||||
return alloc_start;
|
||||
}
|
||||
|
||||
void FreeUnusedSlabMemory(KVirtualAddress address, size_t size) {
|
||||
/* NOTE: This is called only during initialization, so we don't need exclusive access. */
|
||||
/* Nintendo doesn't acquire the lock here, either. */
|
||||
|
||||
/* Check that there's anything at all for us to free. */
|
||||
if (AMS_UNLIKELY(size == 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Determine the start of the block. */
|
||||
const KVirtualAddress block_start = util::AlignUp(GetInteger(address), alignof(KUnusedSlabMemory));
|
||||
|
||||
/* Check that there's space for a KUnusedSlabMemory to exist. */
|
||||
if (AMS_UNLIKELY(std::numeric_limits<uintptr_t>::max() - sizeof(KUnusedSlabMemory) < GetInteger(block_start))) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Determine the end of the block region. */
|
||||
const KVirtualAddress block_end = util::AlignDown(GetInteger(address) + size, alignof(KUnusedSlabMemory));
|
||||
|
||||
/* Check that the block remains within bounds. */
|
||||
if (AMS_UNLIKELY(block_start + sizeof(KUnusedSlabMemory) - 1 > block_end - 1)){
|
||||
return;
|
||||
}
|
||||
|
||||
/* Create the block. */
|
||||
KUnusedSlabMemory *block = GetPointer<KUnusedSlabMemory>(block_start);
|
||||
std::construct_at(block, GetInteger(block_end) - GetInteger(block_start));
|
||||
|
||||
/* Insert the block into the tree. */
|
||||
g_unused_slab_memory_tree.insert(*block);
|
||||
}
|
||||
|
||||
}
|
|
@ -66,15 +66,11 @@ namespace ams::kern {
|
|||
|
||||
void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) {
|
||||
/* Ensure that the buffer is suitable for our use. */
|
||||
//const size_t app_size = ApplicationMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
||||
//const size_t sys_size = SystemMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
||||
//const size_t info_size = BlockInfoSlabHeapSize * sizeof(KBlockInfo);
|
||||
//const size_t fixed_size = util::AlignUp(app_size + sys_size + info_size, PageSize);
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
|
||||
/* Ensure that we have space for our reference counts. */
|
||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(size), PageSize);
|
||||
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize);
|
||||
MESOSPHERE_ABORT_UNLESS(rc_size < size);
|
||||
size -= rc_size;
|
||||
|
||||
|
@ -82,13 +78,28 @@ namespace ams::kern {
|
|||
g_resource_manager_page_manager.Initialize(address, size);
|
||||
|
||||
/* Initialize the fixed-size slabheaps. */
|
||||
s_app_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
||||
s_sys_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize);
|
||||
s_block_info_manager.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize);
|
||||
s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
||||
s_sys_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize);
|
||||
s_block_info_heap.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize);
|
||||
|
||||
/* Reserve all remaining pages for the page table manager. */
|
||||
const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed();
|
||||
s_page_table_manager.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer<KPageTableManager::RefCount>(address + size));
|
||||
/* Reserve all but a fixed number of remaining pages for the page table heap. */
|
||||
const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() - ReservedDynamicPageCount;
|
||||
s_page_table_heap.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer<KPageTableManager::RefCount>(address + size));
|
||||
|
||||
/* Setup the slab managers. */
|
||||
KDynamicPageManager * const app_dynamic_page_manager = nullptr;
|
||||
KDynamicPageManager * const sys_dynamic_page_manager = KTargetSystem::IsDynamicResourceLimitsEnabled() ? std::addressof(g_resource_manager_page_manager) : nullptr;
|
||||
s_app_memory_block_manager.Initialize(app_dynamic_page_manager, std::addressof(s_app_memory_block_heap));
|
||||
s_sys_memory_block_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_sys_memory_block_heap));
|
||||
|
||||
s_app_block_info_manager.Initialize(app_dynamic_page_manager, std::addressof(s_block_info_heap));
|
||||
s_sys_block_info_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_block_info_heap));
|
||||
|
||||
s_app_page_table_manager.Initialize(app_dynamic_page_manager, std::addressof(s_page_table_heap));
|
||||
s_sys_page_table_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_page_table_heap));
|
||||
|
||||
/* Check that we have the correct number of dynamic pages available. */
|
||||
MESOSPHERE_ABORT_UNLESS(g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() == ReservedDynamicPageCount);
|
||||
}
|
||||
|
||||
void Kernel::PrintLayout() {
|
||||
|
|
|
@ -60,12 +60,28 @@ namespace ams::kern::svc {
|
|||
auto &process = GetCurrentProcess();
|
||||
auto &handle_table = process.GetHandleTable();
|
||||
|
||||
/* Declare the event we're going to allocate. */
|
||||
KEvent *event;
|
||||
|
||||
/* Reserve a new event from the process resource limit. */
|
||||
KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax);
|
||||
R_UNLESS(event_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
if (event_reservation.Succeeded()) {
|
||||
/* Allocate an event normally. */
|
||||
event = KEvent::Create();
|
||||
} else {
|
||||
/* We couldn't reserve an event. Check that we support dynamically expanding the resource limit. */
|
||||
R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||
|
||||
/* Create a new event. */
|
||||
KEvent *event = KEvent::Create();
|
||||
/* Try to allocate an event from unused slab memory. */
|
||||
event = KEvent::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(event != nullptr, svc::ResultLimitReached());
|
||||
|
||||
/* We successfully allocated an event, so add the object we allocated to the resource limit. */
|
||||
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_EventCountMax, 1);
|
||||
}
|
||||
|
||||
/* Check that we successfully created an event. */
|
||||
R_UNLESS(event != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Initialize the event. */
|
||||
|
|
|
@ -27,12 +27,44 @@ namespace ams::kern::svc {
|
|||
auto &process = GetCurrentProcess();
|
||||
auto &handle_table = process.GetHandleTable();
|
||||
|
||||
/* Declare the session we're going to allocate. */
|
||||
T *session;
|
||||
|
||||
/* Reserve a new session from the process resource limit. */
|
||||
KScopedResourceReservation session_reservation(std::addressof(process), ams::svc::LimitableResource_SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
if (session_reservation.Succeeded()) {
|
||||
/* Allocate a session normally. */
|
||||
session = T::Create();
|
||||
} else {
|
||||
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||
R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||
|
||||
/* Create a new session. */
|
||||
T *session = T::Create();
|
||||
/* Try to allocate a session from unused slab memory. */
|
||||
session = T::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||
|
||||
/* If we're creating a KSession, we want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||
/* NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's no reason to not do this statically. */
|
||||
if constexpr (std::same_as<T, KSession>) {
|
||||
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
|
||||
request->Close();
|
||||
}
|
||||
}
|
||||
session_guard.Cancel();
|
||||
}
|
||||
|
||||
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||
}
|
||||
|
||||
/* Check that we successfully created a session. */
|
||||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Initialize the session. */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue