Integrate new result macros. (#1780)

* result: try out some experimental shenanigans

* result: sketch out some more shenanigans

* result: see what it looks like to convert kernel to use result conds instead of guards

* make rest of kernel use experimental new macro-ing
This commit is contained in:
SciresM 2022-02-14 14:45:32 -08:00 committed by GitHub
parent 375ba615be
commit 96f95b9f95
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
109 changed files with 1355 additions and 1380 deletions

View file

@ -304,7 +304,7 @@ namespace ams::kern::arch::arm64::cpu {
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::InvalidateDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result StoreDataCacheRange(uintptr_t start, uintptr_t end) {
@ -312,7 +312,7 @@ namespace ams::kern::arch::arm64::cpu {
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::StoreDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result FlushDataCacheRange(uintptr_t start, uintptr_t end) {
@ -320,7 +320,7 @@ namespace ams::kern::arch::arm64::cpu {
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::FlushDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result InvalidateInstructionCacheRange(uintptr_t start, uintptr_t end) {
@ -328,7 +328,7 @@ namespace ams::kern::arch::arm64::cpu {
MESOSPHERE_ASSERT(util::IsAligned(end, InstructionCacheLineSize));
R_UNLESS(UserspaceAccess::InvalidateInstructionCache(start, end), svc::ResultInvalidCurrentMemory());
EnsureInstructionConsistency();
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
@ -440,7 +440,7 @@ namespace ams::kern::arch::arm64::cpu {
R_TRY(InvalidateDataCacheRange(aligned_start, aligned_end));
}
return ResultSuccess();
R_SUCCEED();
}
Result StoreDataCache(const void *addr, size_t size) {
@ -448,7 +448,7 @@ namespace ams::kern::arch::arm64::cpu {
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
return StoreDataCacheRange(start, end);
R_RETURN(StoreDataCacheRange(start, end));
}
Result FlushDataCache(const void *addr, size_t size) {
@ -456,7 +456,7 @@ namespace ams::kern::arch::arm64::cpu {
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
return FlushDataCacheRange(start, end);
R_RETURN(FlushDataCacheRange(start, end));
}
Result InvalidateInstructionCache(void *addr, size_t size) {
@ -469,7 +469,7 @@ namespace ams::kern::arch::arm64::cpu {
/* Request the interrupt helper to perform an instruction memory barrier. */
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InstructionMemoryBarrier);
return ResultSuccess();
R_SUCCEED();
}
void InvalidateEntireInstructionCache() {

View file

@ -131,7 +131,7 @@ namespace ams::kern::arch::arm64 {
}
/* Get the FPU context. */
return this->GetFpuContext(out, thread, context_flags);
R_RETURN(this->GetFpuContext(out, thread, context_flags));
}
Result KDebug::SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
@ -180,7 +180,7 @@ namespace ams::kern::arch::arm64 {
}
/* Set the FPU context. */
return this->SetFpuContext(ctx, thread, context_flags);
R_RETURN(this->SetFpuContext(ctx, thread, context_flags));
}
Result KDebug::GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) {
@ -218,7 +218,7 @@ namespace ams::kern::arch::arm64 {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KDebug::SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
@ -243,11 +243,11 @@ namespace ams::kern::arch::arm64 {
t_ctx->SetFpuRegisters(ctx.v, this->Is64Bit());
}
return ResultSuccess();
R_SUCCEED();
}
Result KDebug::BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) {
return KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size);
R_RETURN(KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size));
}
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
@ -384,10 +384,10 @@ namespace ams::kern::arch::arm64 {
}
} else {
/* Invalid name. */
return svc::ResultInvalidEnumValue();
R_THROW(svc::ResultInvalidEnumValue());
}
return ResultSuccess();
R_SUCCEED();
}
#undef MESOSPHERE_SET_HW_WATCH_POINT

View file

@ -215,12 +215,12 @@ namespace ams::kern::arch::arm64 {
if (KInterruptController::IsGlobal(irq)) {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
return this->BindGlobal(handler, irq, core_id, priority, manual_clear, level);
R_RETURN(this->BindGlobal(handler, irq, core_id, priority, manual_clear, level));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
return this->BindLocal(handler, irq, priority, manual_clear);
R_RETURN(this->BindLocal(handler, irq, priority, manual_clear));
}
}
@ -234,12 +234,12 @@ namespace ams::kern::arch::arm64 {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
return this->UnbindGlobal(irq);
R_RETURN(this->UnbindGlobal(irq));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
return this->UnbindLocal(irq);
R_RETURN(this->UnbindLocal(irq));
}
}
@ -252,12 +252,12 @@ namespace ams::kern::arch::arm64 {
if (KInterruptController::IsGlobal(irq)) {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
return this->ClearGlobal(irq);
R_RETURN(this->ClearGlobal(irq));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
return this->ClearLocal(irq);
R_RETURN(this->ClearLocal(irq));
}
}
@ -288,7 +288,7 @@ namespace ams::kern::arch::arm64 {
m_interrupt_controller.SetPriorityLevel(irq, priority);
m_interrupt_controller.Enable(irq);
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) {
@ -311,7 +311,7 @@ namespace ams::kern::arch::arm64 {
m_interrupt_controller.SetPriorityLevel(irq, priority);
m_interrupt_controller.Enable(irq);
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptManager::UnbindGlobal(s32 irq) {
@ -323,7 +323,7 @@ namespace ams::kern::arch::arm64 {
GetGlobalInterruptEntry(irq).handler = nullptr;
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptManager::UnbindLocal(s32 irq) {
@ -335,7 +335,7 @@ namespace ams::kern::arch::arm64 {
entry.handler = nullptr;
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptManager::ClearGlobal(s32 irq) {
@ -350,7 +350,7 @@ namespace ams::kern::arch::arm64 {
/* Clear and enable. */
entry.needs_clear = false;
m_interrupt_controller.Enable(irq);
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptManager::ClearLocal(s32 irq) {
@ -365,7 +365,7 @@ namespace ams::kern::arch::arm64 {
/* Clear and set priority. */
entry.needs_clear = false;
m_interrupt_controller.SetPriorityLevel(irq, entry.priority);
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager, KResourceLimit *resource_limit) {
@ -187,7 +187,7 @@ namespace ams::kern::arch::arm64 {
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(m_asid); };
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
/* Set our manager. */
m_manager = pt_manager;
@ -196,7 +196,7 @@ namespace ams::kern::arch::arm64 {
const KVirtualAddress new_table = m_manager->Allocate();
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
auto table_guard = SCOPE_GUARD { m_manager->Free(new_table); };
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(as_type);
@ -204,13 +204,9 @@ namespace ams::kern::arch::arm64 {
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager, resource_limit));
/* We succeeded! */
table_guard.Cancel();
asid_guard.Cancel();
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::Finalize() {
@ -316,7 +312,7 @@ namespace ams::kern::arch::arm64 {
/* Release our asid. */
g_asid_manager.Release(m_asid);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
@ -334,17 +330,17 @@ namespace ams::kern::arch::arm64 {
}
if (operation == OperationType_Unmap) {
return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll);
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
} else {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_Map:
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_ChangePermissions:
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll);
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh:
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll);
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -361,7 +357,7 @@ namespace ams::kern::arch::arm64 {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_MapGroup:
return this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -388,7 +384,7 @@ namespace ams::kern::arch::arm64 {
phys_addr += L1BlockSize;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -447,7 +443,7 @@ namespace ams::kern::arch::arm64 {
this->GetPageTableManager().Open(l2_virt, l2_open_count);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -503,7 +499,8 @@ namespace ams::kern::arch::arm64 {
} else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
this->GetPageTableManager().Open(l2_virt, l2_open_count);
}
return svc::ResultOutOfResource();
R_THROW(svc::ResultOutOfResource());
}
/* Set the entry. */
@ -551,7 +548,7 @@ namespace ams::kern::arch::arm64 {
this->GetPageTableManager().Open(l3_virt, l3_open_count);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
@ -563,13 +560,13 @@ namespace ams::kern::arch::arm64 {
if (!force) {
const size_t size = num_pages * PageSize;
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
if (num_pages > 1) {
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
merge_guard.Cancel();
}
}
@ -717,7 +714,7 @@ namespace ams::kern::arch::arm64 {
this->NoteUpdated();
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -731,7 +728,7 @@ namespace ams::kern::arch::arm64 {
/* Map the pages, using a guard to ensure we don't leak. */
{
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
@ -778,9 +775,6 @@ namespace ams::kern::arch::arm64 {
}
}
}
/* We successfully mapped, so cancel our guard. */
map_guard.Cancel();
}
/* Perform what coalescing we can. */
@ -794,7 +788,7 @@ namespace ams::kern::arch::arm64 {
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -810,7 +804,7 @@ namespace ams::kern::arch::arm64 {
/* Map the pages, using a guard to ensure we don't leak. */
{
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
for (const auto &block : pg) {
@ -875,9 +869,6 @@ namespace ams::kern::arch::arm64 {
}
}
}
/* We successfully mapped, so cancel our guard. */
map_guard.Cancel();
}
MESOSPHERE_ASSERT(mapped_pages == num_pages);
@ -889,7 +880,7 @@ namespace ams::kern::arch::arm64 {
/* We succeeded! We want to persist the reference to the pages. */
spg.CancelClose();
return ResultSuccess();
R_SUCCEED();
}
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
@ -1184,18 +1175,17 @@ namespace ams::kern::arch::arm64 {
}
/* We're done! */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Try to separate pages, re-merging if we fail. */
auto guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
guard.Cancel();
/* If we fail while separating, re-merge. */
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
return ResultSuccess();
/* Try to separate pages. */
R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
}
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) {
@ -1208,9 +1198,9 @@ namespace ams::kern::arch::arm64 {
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
merge_guard.Cancel();
}
/* ===================================================== */
@ -1426,7 +1416,7 @@ namespace ams::kern::arch::arm64 {
this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list);
}
return ResultSuccess();
R_SUCCEED();
}
void KPageTable::FinalizeUpdateImpl(PageLinkedList *page_list) {

View file

@ -150,12 +150,12 @@ namespace ams::kern::arch::arm64 {
/* Lock the context, if we're a main thread. */
m_locked = is_main;
return ResultSuccess();
R_SUCCEED();
}
Result KThreadContext::Finalize() {
/* This doesn't actually do anything. */
return ResultSuccess();
R_SUCCEED();
}
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {