kern: add infra (but not impl) for all DeviceAddressSpace svcs

This commit is contained in:
Michael Scire 2020-07-14 18:46:25 -07:00 committed by SciresM
parent 863515a3b5
commit 0d3aa13f70
11 changed files with 303 additions and 13 deletions

View file

@ -57,15 +57,71 @@ namespace ams::kern {
}
Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) {
MESOSPHERE_UNIMPLEMENTED();
/* Lock the address space. */
KScopedLightLock lk(this->lock);
/* Detach. */
return this->table.Detach(device_name);
}
Result KDeviceAddressSpace::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings) {
MESOSPHERE_UNIMPLEMENTED();
/* Check that the address falls within the space. */
R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory());
/* Lock the address space. */
KScopedLightLock lk(this->lock);
/* Lock the pages. */
KPageGroup pg(page_table->GetBlockInfoManager());
R_TRY(page_table->LockForDeviceAddressSpace(std::addressof(pg), process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure that if we fail, we don't keep unmapped pages locked. */
ON_SCOPE_EXIT {
if (*out_mapped_size != size) {
page_table->UnlockForDeviceAddressSpace(process_address + *out_mapped_size, size - *out_mapped_size);
};
};
/* Map the pages. */
{
/* Clear the output size to zero on failure. */
auto map_guard = SCOPE_GUARD { *out_mapped_size = 0; };
/* Perform the mapping. */
R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings));
/* We succeeded, so cancel our guard. */
map_guard.Cancel();
}
return ResultSuccess();
}
Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) {
MESOSPHERE_UNIMPLEMENTED();
/* Check that the address falls within the space. */
R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory());
/* Lock the address space. */
KScopedLightLock lk(this->lock);
/* Make and open a page group for the unmapped region. */
KPageGroup pg(page_table->GetBlockInfoManager());
R_TRY(page_table->MakeAndOpenPageGroupContiguous(std::addressof(pg), process_address, size / PageSize,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_AnyLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
/* Ensure the page group is closed on scope exit. */
ON_SCOPE_EXIT { pg.Close(); };
/* Unmap. */
R_TRY(this->table.Unmap(pg, device_address));
/* Unlock the pages. */
R_TRY(page_table->UnlockForDeviceAddressSpace(process_address, size));
return ResultSuccess();
}
}