kern: implement SvcUnmapMemory, more of SvcGetInfo

This commit is contained in:
Michael Scire 2020-05-29 00:57:25 -07:00
parent faad5609b9
commit 1a0696f8a3
14 changed files with 231 additions and 37 deletions

View file

@ -30,7 +30,7 @@ namespace ams::kern::svc {
private: \
using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \
public: \
static NOINLINE void Call64() { return Impl::Call64(); } \
static NOINLINE void Call64() { return Impl::Call64(); } \
static NOINLINE void Call64From32() { return Impl::Call64From32(); } \
};
#else
@ -51,28 +51,45 @@ namespace ams::kern::svc {
#pragma GCC pop_options
constexpr const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64From32Impl = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
table[ID] = NAME::Call64From32;
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
return table;
}();
constexpr const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64Impl = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
table[ID] = NAME::Call64;
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
return table;
}();
constexpr bool IsValidSvcTable(const std::array<SvcTableEntry, NumSupervisorCalls> &table) {
for (size_t i = 0; i < NumSupervisorCalls; i++) {
if (table[i] != nullptr) {
return true;
}
}
return false;
}
static_assert(IsValidSvcTable(SvcTable64Impl));
static_assert(IsValidSvcTable(SvcTable64From32Impl));
}
const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64From32 = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
constinit const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64 = SvcTable64Impl;
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
table[ID] = NAME::Call64From32;
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
return table;
}();
const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64 = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
table[ID] = NAME::Call64;
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
return table;
}();
constinit const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64From32 = SvcTable64From32Impl;
}

View file

@ -389,4 +389,12 @@ namespace ams::kern::board::nintendo::nx {
}
}
/* Constant calculations. */
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
if (pool == KMemoryManager::Pool_Applet) {
return 0;
}
return size;
}
}

View file

@ -40,7 +40,7 @@ namespace ams::kern {
this->code_region_start = 0;
this->code_region_end = 0;
this->max_heap_size = 0;
this->max_physical_memory_size = 0;
this->mapped_physical_memory_size = 0;
this->mapped_unsafe_physical_memory = 0;
this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
@ -225,7 +225,7 @@ namespace ams::kern {
/* Set heap and fill members. */
this->current_heap_end = this->heap_region_start;
this->max_heap_size = 0;
this->max_physical_memory_size = 0;
this->mapped_physical_memory_size = 0;
this->mapped_unsafe_physical_memory = 0;
const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
@ -452,6 +452,68 @@ namespace ams::kern {
return ResultSuccess();
}
Result KPageTableBase::UnmapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Validate that the source address's state is valid. */
KMemoryState src_state;
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_NotMapped | KMemoryPermission_KernelRead, KMemoryAttribute_All, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked));
/* Validate that the dst address's state is valid. */
KMemoryPermission dst_perm;
R_TRY(this->CheckMemoryState(nullptr, std::addressof(dst_perm), nullptr, dst_address, size, KMemoryState_All, KMemoryState_Stack, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator for the source. */
KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager);
R_TRY(src_allocator.GetResult());
/* Create an update allocator for the destination. */
KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager);
R_TRY(dst_allocator.GetResult());
/* Unmap the memory. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(this->block_info_manager);
/* Create the page group representing the destination. */
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
/* Ensure the page group is the valid for the source. */
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the aliased copy of the pages. */
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
auto remap_guard = SCOPE_GUARD {
const KPageProperties dst_remap_properties = { dst_perm, false, false, false };
MESOSPHERE_R_ABORT_UNLESS(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_remap_properties, true));
};
/* Try to set the permissions for the source pages back to what they should be. */
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
remap_guard.Cancel();
/* Apply the memory block updates. */
this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None);
}
return ResultSuccess();
}
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
KProcessAddress address = Null<KProcessAddress>;

View file

@ -280,6 +280,28 @@ namespace ams::kern {
this->thread_list.erase(this->thread_list.iterator_to(*thread));
}
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
const size_t norm_size = this->page_table.GetNormalMemorySize();
const size_t other_size = this->code_size + this->main_thread_stack_size;
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool);
return norm_size + other_size + sec_size;
}
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t used_size = this->GetUsedNonSystemUserPhysicalMemorySize();
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool);
const size_t max_size = this->max_process_memory;
if (used_size + free_size > max_size) {
return max_size - sec_size;
} else {
return free_size + used_size - sec_size;
}
}
Result KProcess::Run(s32 priority, size_t stack_size) {
MESOSPHERE_ASSERT_THIS();

View file

@ -34,6 +34,10 @@ namespace ams::kern::svc {
case ams::svc::InfoType_AslrRegionSize:
case ams::svc::InfoType_StackRegionAddress:
case ams::svc::InfoType_StackRegionSize:
case ams::svc::InfoType_ProgramId:
case ams::svc::InfoType_InitialProcessIdRange:
case ams::svc::InfoType_UserExceptionContextAddress:
case ams::svc::InfoType_TotalNonSystemMemorySize:
{
/* These info types don't support non-zero subtypes. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());
@ -67,6 +71,18 @@ namespace ams::kern::svc {
case ams::svc::InfoType_StackRegionSize:
*out = process->GetPageTable().GetStackRegionSize();
break;
case ams::svc::InfoType_ProgramId:
*out = process->GetProgramId();
break;
case ams::svc::InfoType_InitialProcessIdRange:
/* TODO: Detect exactly 4.0.0 target firmware, do the right thing. */
return svc::ResultInvalidEnumValue();
case ams::svc::InfoType_UserExceptionContextAddress:
*out = GetInteger(process->GetProcessLocalRegionAddress());
break;
case ams::svc::InfoType_TotalNonSystemMemorySize:
*out = process->GetTotalNonSystemUserPhysicalMemorySize();
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}

View file

@ -21,7 +21,32 @@ namespace ams::kern::svc {
namespace {
Result UnmapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) {
/* Log the call parameters for debugging. */
MESOSPHERE_LOG("UnmapMemory(%zx, %zx, %zx)\n", dst_address, src_address, size);
/* Validate that addresses are page aligned. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
/* Validate that size is positive and page aligned. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
/* Ensure that neither mapping overflows. */
R_UNLESS(src_address < src_address + size, svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_address < dst_address + size, svc::ResultInvalidCurrentMemory());
/* Get the page table we're operating on. */
auto &page_table = GetCurrentProcess().GetPageTable();
/* Ensure that the memory we're unmapping is in range. */
R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_Stack), svc::ResultInvalidMemoryRegion());
/* Unmap the memory. */
return page_table.UnmapMemory(dst_address, src_address, size);
}
}
@ -40,7 +65,7 @@ namespace ams::kern::svc {
}
Result UnmapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64 was called.");
return UnmapMemory(dst_address, src_address, size);
}
/* ============================= 64From32 ABI ============================= */
@ -58,7 +83,7 @@ namespace ams::kern::svc {
}
Result UnmapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64From32 was called.");
return UnmapMemory(dst_address, src_address, size);
}
}