mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-29 14:05:17 -04:00
kern: update for new hw maintenance semantics
This commit is contained in:
parent
6e17317d5d
commit
9d89835ff8
19 changed files with 386 additions and 244 deletions
|
@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
|
||||
const u64 target_mask = m_target_cores.Load();
|
||||
|
||||
DataSynchronizationBarrier();
|
||||
DataSynchronizationBarrierInnerShareable();
|
||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
||||
|
||||
this->ProcessOperation();
|
||||
|
@ -213,32 +213,37 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
};
|
||||
|
||||
/* Instances of the interrupt handlers. */
|
||||
KThreadTerminationInterruptHandler g_thread_termination_handler;
|
||||
KCacheHelperInterruptHandler g_cache_operation_handler;
|
||||
KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores];
|
||||
constinit KThreadTerminationInterruptHandler g_thread_termination_handler;
|
||||
constinit KCacheHelperInterruptHandler g_cache_operation_handler;
|
||||
constinit KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores];
|
||||
|
||||
/* Expose this as a global, for asm to use. */
|
||||
s32 g_all_core_sync_count;
|
||||
constinit s32 g_all_core_sync_count;
|
||||
|
||||
template<bool Init, typename F>
|
||||
template<typename F>
|
||||
ALWAYS_INLINE void PerformCacheOperationBySetWayImpl(int level, F f) {
|
||||
/* Used in multiple locations. */
|
||||
const u64 level_sel_value = static_cast<u64>(level << 1);
|
||||
|
||||
/* Get the cache size id register value with interrupts disabled. */
|
||||
u64 ccsidr_value;
|
||||
if constexpr (Init) {
|
||||
/* During init, we can just set the selection register directly. */
|
||||
cpu::SetCsselrEl1(level_sel_value);
|
||||
cpu::InstructionMemoryBarrier();
|
||||
ccsidr_value = cpu::GetCcsidrEl1();
|
||||
} else {
|
||||
/* After init, we need to care about interrupts. */
|
||||
{
|
||||
/* Disable interrupts. */
|
||||
KScopedInterruptDisable di;
|
||||
|
||||
/* Configure the cache select register for our level. */
|
||||
cpu::SetCsselrEl1(level_sel_value);
|
||||
|
||||
/* Ensure our configuration takes before reading the cache size id register. */
|
||||
cpu::InstructionMemoryBarrier();
|
||||
|
||||
/* Get the cache size id register. */
|
||||
ccsidr_value = cpu::GetCcsidrEl1();
|
||||
}
|
||||
|
||||
/* Ensure that no memory inconsistencies occur between cache management invocations. */
|
||||
cpu::DataSynchronizationBarrier();
|
||||
|
||||
/* Get cache size id info. */
|
||||
CacheSizeIdRegisterAccessor ccsidr_el1(ccsidr_value);
|
||||
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
||||
|
@ -266,13 +271,11 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
}
|
||||
|
||||
void StoreDataCacheBySetWay(int level) {
|
||||
PerformCacheOperationBySetWayImpl<false>(level, StoreDataCacheLineBySetWayImpl);
|
||||
cpu::DataSynchronizationBarrier();
|
||||
PerformCacheOperationBySetWayImpl(level, StoreDataCacheLineBySetWayImpl);
|
||||
}
|
||||
|
||||
void FlushDataCacheBySetWay(int level) {
|
||||
PerformCacheOperationBySetWayImpl<false>(level, FlushDataCacheLineBySetWayImpl);
|
||||
cpu::DataSynchronizationBarrier();
|
||||
PerformCacheOperationBySetWayImpl(level, FlushDataCacheLineBySetWayImpl);
|
||||
}
|
||||
|
||||
void KCacheHelperInterruptHandler::ProcessOperation() {
|
||||
|
@ -284,9 +287,11 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
break;
|
||||
case Operation::StoreDataCache:
|
||||
StoreDataCacheBySetWay(0);
|
||||
cpu::DataSynchronizationBarrier();
|
||||
break;
|
||||
case Operation::FlushDataCache:
|
||||
FlushDataCacheBySetWay(0);
|
||||
cpu::DataSynchronizationBarrier();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -323,14 +328,6 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result InvalidateInstructionCacheRange(uintptr_t start, uintptr_t end) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(start, InstructionCacheLineSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(end, InstructionCacheLineSize));
|
||||
R_UNLESS(UserspaceAccess::InvalidateInstructionCache(start, end), svc::ResultInvalidCurrentMemory());
|
||||
EnsureInstructionConsistency();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
|
||||
__asm__ __volatile__("ic iallu" ::: "memory");
|
||||
}
|
||||
|
@ -341,26 +338,12 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
|
||||
}
|
||||
|
||||
void StoreEntireCacheForInit() {
|
||||
/* Store local. */
|
||||
{
|
||||
CacheLineIdRegisterAccessor clidr_el1;
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
for (int level = 0; level != levels_of_unification; ++level) {
|
||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
||||
}
|
||||
}
|
||||
|
||||
/* Store shared. */
|
||||
{
|
||||
CacheLineIdRegisterAccessor clidr_el1;
|
||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||
|
||||
for (int level = levels_of_unification; level <= levels_of_coherency; ++level) {
|
||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
||||
}
|
||||
void StoreCacheForInit(void *addr, size_t size) {
|
||||
/* Store the data cache for the specified range. */
|
||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
const uintptr_t end = start + size;
|
||||
for (uintptr_t cur = start; cur < end; cur += DataCacheLineSize) {
|
||||
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(cur) : "memory");
|
||||
}
|
||||
|
||||
/* Data synchronization barrier. */
|
||||
|
@ -370,36 +353,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
InvalidateEntireInstructionCacheLocalImpl();
|
||||
|
||||
/* Ensure local instruction consistency. */
|
||||
DataSynchronizationBarrierInnerShareable();
|
||||
InstructionMemoryBarrier();
|
||||
}
|
||||
|
||||
void FlushEntireCacheForInit() {
|
||||
/* Flush data cache. */
|
||||
{
|
||||
/* Get levels of coherence/unificaiton. */
|
||||
CacheLineIdRegisterAccessor clidr_el1;
|
||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
||||
|
||||
/* Store cache from L1 up to (level of coherence - 1). */
|
||||
for (int level = 0; level < levels_of_coherency - 1; ++level) {
|
||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
/* Flush cache from (level of coherence - 1) down to L0. */
|
||||
for (int level = levels_of_coherency; level > 0; --level) {
|
||||
PerformCacheOperationBySetWayImpl<true>(level - 1, FlushDataCacheLineBySetWayImpl);
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidate instruction cache. */
|
||||
InvalidateEntireInstructionCacheLocalImpl();
|
||||
EnsureInstructionConsistency();
|
||||
|
||||
/* Invalidate entire TLB. */
|
||||
InvalidateEntireTlb();
|
||||
}
|
||||
|
||||
void FlushEntireDataCache() {
|
||||
|
@ -417,10 +371,17 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
for (int level = levels_of_coherency; level > 1; --level) {
|
||||
FlushDataCacheBySetWay(level - 1);
|
||||
}
|
||||
|
||||
/* Data synchronization barrier for full system. */
|
||||
DataSynchronizationBarrier();
|
||||
}
|
||||
|
||||
Result InvalidateDataCache(void *addr, size_t size) {
|
||||
KScopedCoreMigrationDisable dm;
|
||||
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
|
||||
const uintptr_t end = start + size;
|
||||
uintptr_t aligned_start = util::AlignDown(start, DataCacheLineSize);
|
||||
|
@ -444,7 +405,11 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
}
|
||||
|
||||
Result StoreDataCache(const void *addr, size_t size) {
|
||||
KScopedCoreMigrationDisable dm;
|
||||
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||
|
||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||
|
||||
|
@ -452,26 +417,17 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
}
|
||||
|
||||
Result FlushDataCache(const void *addr, size_t size) {
|
||||
KScopedCoreMigrationDisable dm;
|
||||
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||
|
||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||
|
||||
R_RETURN(FlushDataCacheRange(start, end));
|
||||
}
|
||||
|
||||
Result InvalidateInstructionCache(void *addr, size_t size) {
|
||||
KScopedCoreMigrationDisable dm;
|
||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), InstructionCacheLineSize);
|
||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, InstructionCacheLineSize);
|
||||
|
||||
R_TRY(InvalidateInstructionCacheRange(start, end));
|
||||
|
||||
/* Request the interrupt helper to perform an instruction memory barrier. */
|
||||
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InstructionMemoryBarrier);
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void InvalidateEntireInstructionCache() {
|
||||
KScopedCoreMigrationDisable dm;
|
||||
|
||||
|
|
|
@ -61,3 +61,138 @@ _ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii:
|
|||
5:
|
||||
stlr wzr, [x0]
|
||||
ret
|
||||
|
||||
/* ams::kern::arch::arm64::cpu::ClearPageToZeroImpl(void *) */
|
||||
.section .text._ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, "ax", %progbits
|
||||
.global _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv
|
||||
.type _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, %function
|
||||
_ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv:
|
||||
/* Efficiently clear the page using dc zva. */
|
||||
dc zva, x0
|
||||
add x8, x0, #0x040
|
||||
dc zva, x8
|
||||
add x8, x0, #0x080
|
||||
dc zva, x8
|
||||
add x8, x0, #0x0c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x100
|
||||
dc zva, x8
|
||||
add x8, x0, #0x140
|
||||
dc zva, x8
|
||||
add x8, x0, #0x180
|
||||
dc zva, x8
|
||||
add x8, x0, #0x1c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x200
|
||||
dc zva, x8
|
||||
add x8, x0, #0x240
|
||||
dc zva, x8
|
||||
add x8, x0, #0x280
|
||||
dc zva, x8
|
||||
add x8, x0, #0x2c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x300
|
||||
dc zva, x8
|
||||
add x8, x0, #0x340
|
||||
dc zva, x8
|
||||
add x8, x0, #0x380
|
||||
dc zva, x8
|
||||
add x8, x0, #0x3c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x400
|
||||
dc zva, x8
|
||||
add x8, x0, #0x440
|
||||
dc zva, x8
|
||||
add x8, x0, #0x480
|
||||
dc zva, x8
|
||||
add x8, x0, #0x4c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x500
|
||||
dc zva, x8
|
||||
add x8, x0, #0x540
|
||||
dc zva, x8
|
||||
add x8, x0, #0x580
|
||||
dc zva, x8
|
||||
add x8, x0, #0x5c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x600
|
||||
dc zva, x8
|
||||
add x8, x0, #0x640
|
||||
dc zva, x8
|
||||
add x8, x0, #0x680
|
||||
dc zva, x8
|
||||
add x8, x0, #0x6c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x700
|
||||
dc zva, x8
|
||||
add x8, x0, #0x740
|
||||
dc zva, x8
|
||||
add x8, x0, #0x780
|
||||
dc zva, x8
|
||||
add x8, x0, #0x7c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x800
|
||||
dc zva, x8
|
||||
add x8, x0, #0x840
|
||||
dc zva, x8
|
||||
add x8, x0, #0x880
|
||||
dc zva, x8
|
||||
add x8, x0, #0x8c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0x900
|
||||
dc zva, x8
|
||||
add x8, x0, #0x940
|
||||
dc zva, x8
|
||||
add x8, x0, #0x980
|
||||
dc zva, x8
|
||||
add x8, x0, #0x9c0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xa00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xa40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xa80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xac0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xb00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xb40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xb80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xbc0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xc00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xc40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xc80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xcc0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xd00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xd40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xd80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xdc0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xe00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xe40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xe80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xec0
|
||||
dc zva, x8
|
||||
add x8, x0, #0xf00
|
||||
dc zva, x8
|
||||
add x8, x0, #0xf40
|
||||
dc zva, x8
|
||||
add x8, x0, #0xf80
|
||||
dc zva, x8
|
||||
add x8, x0, #0xfc0
|
||||
dc zva, x8
|
||||
ret
|
||||
|
|
|
@ -257,21 +257,21 @@ namespace ams::kern::arch::arm64 {
|
|||
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
|
||||
({ \
|
||||
cpu::SetDbgBcr##ID##El1(0); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
cpu::SetDbgBvr##ID##El1(VALUE); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
cpu::SetDbgBcr##ID##El1(FLAGS); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
})
|
||||
|
||||
#define MESOSPHERE_SET_HW_WATCH_POINT(ID, FLAGS, VALUE) \
|
||||
({ \
|
||||
cpu::SetDbgWcr##ID##El1(0); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
cpu::SetDbgWvr##ID##El1(VALUE); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
cpu::SetDbgWcr##ID##El1(FLAGS); \
|
||||
cpu::EnsureInstructionConsistency(); \
|
||||
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||
})
|
||||
|
||||
Result KDebug::SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value) {
|
||||
|
|
|
@ -158,6 +158,32 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void KPageTable::NoteUpdated() const {
|
||||
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||
|
||||
/* Mark ourselves as in a tlb maintenance operation. */
|
||||
GetCurrentThread().SetInTlbMaintenanceOperation();
|
||||
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||
|
||||
if (this->IsKernel()) {
|
||||
this->OnKernelTableUpdated();
|
||||
} else {
|
||||
this->OnTableUpdated();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void KPageTable::NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
||||
MESOSPHERE_ASSERT(this->IsKernel());
|
||||
|
||||
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||
|
||||
/* Mark ourselves as in a tlb maintenance operation. */
|
||||
GetCurrentThread().SetInTlbMaintenanceOperation();
|
||||
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||
|
||||
this->OnKernelTableSinglePageUpdated(virt_addr);
|
||||
}
|
||||
|
||||
void KPageTable::Initialize(s32 core_id) {
|
||||
/* Nothing actually needed here. */
|
||||
MESOSPHERE_UNUSED(core_id);
|
||||
|
@ -412,9 +438,8 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Set the entry. */
|
||||
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||
PteDataSynchronizationBarrier();
|
||||
} else {
|
||||
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||
}
|
||||
|
@ -477,9 +502,8 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Set the entry. */
|
||||
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||
PteDataSynchronizationBarrier();
|
||||
l2_allocated = true;
|
||||
} else {
|
||||
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||
|
@ -505,9 +529,8 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Set the entry. */
|
||||
l3_phys = GetPageTablePhysicalAddress(l3_virt);
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
||||
PteDataSynchronizationBarrier();
|
||||
l2_open_count++;
|
||||
} else {
|
||||
l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||
|
@ -631,7 +654,7 @@ namespace ams::kern::arch::arm64 {
|
|||
for (size_t i = 0; i < num_l2_blocks; i++) {
|
||||
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
||||
}
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
|
||||
/* Close references to the L2 table. */
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||
|
@ -665,7 +688,7 @@ namespace ams::kern::arch::arm64 {
|
|||
for (size_t i = 0; i < num_l3_blocks; i++) {
|
||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
||||
}
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
|
||||
/* Close references to the L3 table. */
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||
|
@ -783,6 +806,9 @@ namespace ams::kern::arch::arm64 {
|
|||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
/* Wait for pending stores to complete. */
|
||||
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||
|
||||
/* Open references to the pages, if we should. */
|
||||
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
||||
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
|
||||
|
@ -878,6 +904,9 @@ namespace ams::kern::arch::arm64 {
|
|||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
/* Wait for pending stores to complete. */
|
||||
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||
|
||||
/* We succeeded! We want to persist the reference to the pages. */
|
||||
spg.CancelClose();
|
||||
R_SUCCEED();
|
||||
|
@ -967,7 +996,6 @@ namespace ams::kern::arch::arm64 {
|
|||
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
||||
|
||||
/* Merge! */
|
||||
PteDataSynchronizationBarrier();
|
||||
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
||||
|
||||
/* Note that we updated. */
|
||||
|
@ -1049,7 +1077,6 @@ namespace ams::kern::arch::arm64 {
|
|||
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
||||
|
||||
/* Merge! */
|
||||
/* NOTE: As of 13.1.0, Nintendo does not do: PteDataSynchronizationBarrier(); */
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
||||
|
||||
/* Note that we updated. */
|
||||
|
@ -1097,7 +1124,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
|
||||
|
||||
/* Replace the L1 entry with one to the new table. */
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||
this->NoteUpdated();
|
||||
}
|
||||
|
@ -1147,7 +1174,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
|
||||
|
||||
/* Replace the L2 entry with one to the new table. */
|
||||
PteDataSynchronizationBarrier();
|
||||
PteDataMemoryBarrier();
|
||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
||||
this->NoteUpdated();
|
||||
}
|
||||
|
|
|
@ -577,26 +577,6 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm:
|
|||
mov x0, #1
|
||||
ret
|
||||
|
||||
/* ams::kern::arch::arm64::UserspaceAccess::InvalidateInstructionCache(uintptr_t start, uintptr_t end) */
|
||||
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, "ax", %progbits
|
||||
.global _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm
|
||||
.type _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, %function
|
||||
.balign 0x10
|
||||
_ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm:
|
||||
/* Check if we have any work to do. */
|
||||
cmp x1, x0
|
||||
b.eq 2f
|
||||
|
||||
1: /* Loop, invalidating each cache line. */
|
||||
ic ivau, x0
|
||||
add x0, x0, #0x40
|
||||
cmp x1, x0
|
||||
b.ne 1b
|
||||
|
||||
2: /* We're done! */
|
||||
mov x0, #1
|
||||
ret
|
||||
|
||||
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory32Bit(void *dst, const void *src, size_t size) */
|
||||
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, "ax", %progbits
|
||||
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm
|
||||
|
|
|
@ -278,6 +278,9 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm:
|
|||
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
|
||||
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function
|
||||
_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm:
|
||||
/* cpu::DataSynchronizationBarrier(); */
|
||||
dsb sy
|
||||
|
||||
/* const u64 level_sel_value = level << 1; */
|
||||
lsl x8, x0, #1
|
||||
|
||||
|
|
|
@ -179,13 +179,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
/* Flush caches. */
|
||||
/* NOTE: This seems incorrect according to arm spec, which says not to flush via set/way after boot. */
|
||||
/* However, Nintendo flushes the entire cache here and not doing so has caused reports of abort with ESR_EL1 */
|
||||
/* as 0x02000000 (unknown abort) to occur. */
|
||||
MESOSPHERE_UNUSED(params);
|
||||
cpu::FlushEntireDataCache();
|
||||
cpu::InvalidateEntireInstructionCache();
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
|
|
@ -1315,7 +1315,7 @@ namespace ams::kern {
|
|||
/* If the thread is runnable, send a termination interrupt to other cores. */
|
||||
if (this->GetState() == ThreadState_Runnable) {
|
||||
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue