kern: update for new device map semantics

This commit is contained in:
Michael Scire 2022-10-11 22:22:11 -07:00 committed by SciresM
parent 947fdcf6f6
commit 5ee7d8a5ed
14 changed files with 115 additions and 58 deletions

View file

@ -1118,6 +1118,7 @@ namespace ams::kern::board::nintendo::nx {
}
Result KDevicePageTable::MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Ensure that the region we're mapping to is free. */
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
@ -1416,11 +1417,15 @@ namespace ams::kern::board::nintendo::nx {
return true;
}
Result KDevicePageTable::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
Result KDevicePageTable::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool is_io) {
/* Validate address/size. */
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* IO is not supported on NX board. */
MESOSPHERE_ASSERT(!is_io);
MESOSPHERE_UNUSED(is_io);
/* Map the pages. */
R_RETURN(this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned));
}

View file

@ -61,10 +61,21 @@ namespace ams::kern {
R_RETURN(m_table.Detach(device_name));
}
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option, bool is_aligned) {
/* Check that the address falls within the space. */
R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory());
/* Decode the option. */
const util::BitPack32 option_pack = { option };
const auto device_perm = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Permission>();
const auto flags = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Flags>();
const auto reserved = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Reserved>();
/* Validate the option. */
/* TODO: It is likely that this check for flags == none is only on NX board. */
R_UNLESS(flags == ams::svc::MapDeviceAddressSpaceFlag_None, svc::ResultInvalidEnumValue());
R_UNLESS(reserved == 0, svc::ResultInvalidEnumValue());
/* Lock the address space. */
KScopedLightLock lk(m_lock);
@ -72,15 +83,21 @@ namespace ams::kern {
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
/* Lock the pages. */
R_TRY(page_table->LockForMapDeviceAddressSpace(process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
bool is_io{};
R_TRY(page_table->LockForMapDeviceAddressSpace(std::addressof(is_io), process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned, true));
/* Ensure that if we fail, we don't keep unmapped pages locked. */
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
/* Check that the io status is allowable. */
if (is_io) {
R_UNLESS((flags & ams::svc::MapDeviceAddressSpaceFlag_NotIoRegister) == 0, svc::ResultInvalidCombination());
}
/* Map the pages. */
{
/* Perform the mapping. */
R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm, is_aligned));
R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm, is_aligned, is_io));
/* Ensure that we unmap the pages if we fail to update the protections. */
/* NOTE: Nintendo does not check the result of this unmap call. */
@ -105,7 +122,7 @@ namespace ams::kern {
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
/* Lock the pages. */
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size));
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size, true));
/* Unmap the pages. */
{

View file

@ -1384,11 +1384,8 @@ namespace ams::kern {
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
/* The region we're traversing has to be heap. */
const KPhysicalAddress phys_address = cur_entry.phys_addr;
R_UNLESS(this->IsHeapPhysicalAddress(phys_address), svc::ResultInvalidCurrentMemory());
/* Traverse until we have enough size or we aren't contiguous any more. */
const KPhysicalAddress phys_address = cur_entry.phys_addr;
size_t contig_size;
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
@ -1402,11 +1399,11 @@ namespace ams::kern {
/* Take the minimum size for our region. */
size = std::min(size, contig_size);
/* Check that the memory is contiguous. */
R_TRY(this->CheckMemoryStateContiguous(address, size,
state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted,
perm_mask, perm,
attr_mask, attr));
/* Check that the memory is contiguous (modulo the reference count bit). */
const u32 test_state_mask = state_mask | KMemoryState_FlagReferenceCounted;
if (R_FAILED(this->CheckMemoryStateContiguous(address, size, test_state_mask, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr))) {
R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, perm, attr_mask, attr));
}
/* The memory is contiguous, so set the output range. */
*out = {
@ -1913,7 +1910,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Io, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
@ -1927,7 +1924,7 @@ namespace ams::kern {
/* Validate the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Io, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Io, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_Locked));
/* Validate that the region being unmapped corresponds to the physical range described. */
{
@ -2687,7 +2684,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
Result KPageTableBase::LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2696,9 +2693,10 @@ namespace ams::kern {
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap) | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
KMemoryState old_state;
R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
/* Create an update allocator. */
Result allocator_result;
@ -2708,10 +2706,13 @@ namespace ams::kern {
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
/* Set whether the locked memory was io. */
*out_is_io = old_state == KMemoryState_Io;
R_SUCCEED();
}
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2720,10 +2721,11 @@ namespace ams::kern {
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
const u32 test_state = KMemoryState_FlagCanDeviceMap | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
address, size,
KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap,
test_state, test_state,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
@ -2798,7 +2800,7 @@ namespace ams::kern {
KScopedLightLock lk(m_general_lock);
/* Get the range. */
const u32 test_state = KMemoryState_FlagReferenceCounted | (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
R_TRY(this->GetContiguousMemoryRangeWithState(out,
address, size,
test_state, test_state,

View file

@ -80,7 +80,12 @@ namespace ams::kern::svc {
}
}
Result MapDeviceAddressSpaceByForce(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
Result MapDeviceAddressSpaceByForce(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, u32 option) {
/* Decode the option. */
const util::BitPack32 option_pack = { option };
const auto device_perm = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Permission>();
const auto reserved = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Reserved>();
/* Validate input. */
R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress());
@ -90,6 +95,7 @@ namespace ams::kern::svc {
R_UNLESS((device_address < device_address + size), svc::ResultInvalidMemoryRegion());
R_UNLESS((process_address == static_cast<uintptr_t>(process_address)), svc::ResultInvalidCurrentMemory());
R_UNLESS(IsValidDeviceMemoryPermission(device_perm), svc::ResultInvalidNewMemoryPermission());
R_UNLESS(reserved == 0, svc::ResultInvalidEnumValue());
/* Get the device address space. */
KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject<KDeviceAddressSpace>(das_handle);
@ -104,10 +110,15 @@ namespace ams::kern::svc {
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
/* Map. */
R_RETURN(das->MapByForce(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm));
R_RETURN(das->MapByForce(std::addressof(page_table), KProcessAddress(process_address), size, device_address, option));
}
Result MapDeviceAddressSpaceAligned(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
Result MapDeviceAddressSpaceAligned(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, u32 option) {
/* Decode the option. */
const util::BitPack32 option_pack = { option };
const auto device_perm = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Permission>();
const auto reserved = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Reserved>();
/* Validate input. */
R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress());
@ -118,6 +129,7 @@ namespace ams::kern::svc {
R_UNLESS((device_address < device_address + size), svc::ResultInvalidMemoryRegion());
R_UNLESS((process_address == static_cast<uintptr_t>(process_address)), svc::ResultInvalidCurrentMemory());
R_UNLESS(IsValidDeviceMemoryPermission(device_perm), svc::ResultInvalidNewMemoryPermission());
R_UNLESS(reserved == 0, svc::ResultInvalidEnumValue());
/* Get the device address space. */
KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject<KDeviceAddressSpace>(das_handle);
@ -132,7 +144,7 @@ namespace ams::kern::svc {
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
/* Map. */
R_RETURN(das->MapAligned(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm));
R_RETURN(das->MapAligned(std::addressof(page_table), KProcessAddress(process_address), size, device_address, option));
}
Result UnmapDeviceAddressSpace(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address) {
@ -176,12 +188,12 @@ namespace ams::kern::svc {
R_RETURN(DetachDeviceAddressSpace(device_name, das_handle));
}
Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm));
Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, u32 option) {
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, option));
}
Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm));
Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, u32 option) {
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, option));
}
Result UnmapDeviceAddressSpace64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {
@ -202,12 +214,12 @@ namespace ams::kern::svc {
R_RETURN(DetachDeviceAddressSpace(device_name, das_handle));
}
Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm));
Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, u32 option) {
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, option));
}
Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm));
Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, u32 option) {
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, option));
}
Result UnmapDeviceAddressSpace64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {