kern: implement 10.x perm change, fix many page table bugs

This commit is contained in:
Michael Scire 2020-04-19 17:16:19 -07:00
parent dcfb3bc9b5
commit b39b6f0d5b
8 changed files with 262 additions and 194 deletions

View file

@ -292,7 +292,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make an L1 block? */
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
*l1_entry = L1PageTableEntry(phys_addr, attr, false);
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L1BlockSize;
@ -305,7 +305,7 @@ namespace ams::kern::arch::arm64::init {
if (!l1_entry->IsTable()) {
KPhysicalAddress new_table = allocator.Allocate();
ClearNewPageTable(new_table);
*l1_entry = L1PageTableEntry(new_table, attr.IsPrivilegedExecuteNever());
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -314,7 +314,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make a contiguous L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(phys_addr, attr, true);
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
@ -326,7 +326,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make an L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) {
*l2_entry = L2PageTableEntry(phys_addr, attr, false);
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64::init {
if (!l2_entry->IsTable()) {
KPhysicalAddress new_table = allocator.Allocate();
ClearNewPageTable(new_table);
*l2_entry = L2PageTableEntry(new_table, attr.IsPrivilegedExecuteNever());
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -348,7 +348,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make a contiguous L3 block? */
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(phys_addr, attr, true);
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
@ -359,7 +359,7 @@ namespace ams::kern::arch::arm64::init {
}
/* Make an L3 block. */
*l3_entry = L3PageTableEntry(phys_addr, attr, false);
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
@ -537,7 +537,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L1 block. */
*l1_entry = L1PageTableEntry(block, attr_after, false);
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L1BlockSize;
size -= L1BlockSize;
@ -568,7 +568,7 @@ namespace ams::kern::arch::arm64::init {
/* Create a new contiguous L2 block. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(block + L2BlockSize * i, attr_after, true);
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, block + L2BlockSize * i, attr_after, true);
}
virt_addr += L2ContiguousBlockSize;
@ -586,7 +586,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L2 block. */
*l2_entry = L2PageTableEntry(block, attr_after, false);
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L2BlockSize;
size -= L2BlockSize;
@ -620,7 +620,7 @@ namespace ams::kern::arch::arm64::init {
/* Create a new contiguous L3 block. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(block + L3BlockSize * i, attr_after, true);
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, block + L3BlockSize * i, attr_after, true);
}
virt_addr += L3ContiguousBlockSize;
@ -638,7 +638,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L3 block. */
*l3_entry = L3PageTableEntry(block, attr_after, false);
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L3BlockSize;
size -= L3BlockSize;