ams: globally prefer R_RETURN to return for ams::Result

This commit is contained in:
Michael Scire 2022-03-26 14:48:33 -07:00
parent dd78ede99f
commit bbf22b4c60
325 changed files with 1955 additions and 1993 deletions

View file

@ -94,7 +94,7 @@ namespace ams::fssystem {
Result Initialize(uintptr_t address, size_t size, size_t block_size, s32 order_max);
Result Initialize(uintptr_t address, size_t size, size_t block_size) {
return this->Initialize(address, size, block_size, QueryOrderMax(size, block_size));
R_RETURN(this->Initialize(address, size, block_size, QueryOrderMax(size, block_size)));
}
Result Initialize(uintptr_t address, size_t size, size_t block_size, s32 order_max, void *work, size_t work_size) {
@ -103,11 +103,11 @@ namespace ams::fssystem {
const auto aligned_work = util::AlignUp(reinterpret_cast<uintptr_t>(work), alignof(PageList));
m_external_free_lists = reinterpret_cast<PageList *>(aligned_work);
return this->Initialize(address, size, block_size, order_max);
R_RETURN(this->Initialize(address, size, block_size, order_max));
}
Result Initialize(uintptr_t address, size_t size, size_t block_size, void *work, size_t work_size) {
return this->Initialize(address, size, block_size, QueryOrderMax(size, block_size), work, work_size);
R_RETURN(this->Initialize(address, size, block_size, QueryOrderMax(size, block_size), work, work_size));
}
void Finalize();

View file

@ -64,7 +64,7 @@ namespace ams::fssystem {
R_TRY(this->GetSize(std::addressof(bs_size)));
R_TRY(fs::IStorage::CheckAccessRange(offset, size, bs_size));
return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf, sizeof(work_buf), DataAlign, BufferAlign, offset, static_cast<char *>(buffer), size);
R_RETURN(AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf, sizeof(work_buf), DataAlign, BufferAlign, offset, static_cast<char *>(buffer), size));
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
@ -82,16 +82,16 @@ namespace ams::fssystem {
R_TRY(this->GetSize(std::addressof(bs_size)));
R_TRY(fs::IStorage::CheckAccessRange(offset, size, bs_size));
return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf, sizeof(work_buf), DataAlign, BufferAlign, offset, static_cast<const char *>(buffer), size);
R_RETURN(AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf, sizeof(work_buf), DataAlign, BufferAlign, offset, static_cast<const char *>(buffer), size));
}
virtual Result Flush() override {
return m_base_storage->Flush();
R_RETURN(m_base_storage->Flush());
}
virtual Result SetSize(s64 size) override {
ON_SCOPE_EXIT { m_is_base_storage_size_dirty = true; };
return m_base_storage->SetSize(util::AlignUp(size, DataAlign));
R_RETURN(m_base_storage->SetSize(util::AlignUp(size, DataAlign)));
}
virtual Result GetSize(s64 *out) override {
@ -111,7 +111,7 @@ namespace ams::fssystem {
virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
if (op_id == fs::OperationId::Invalidate) {
return m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size);
R_RETURN(m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size));
} else {
/* Succeed if zero size. */
R_SUCCEED_IF(size == 0);
@ -127,7 +127,7 @@ namespace ams::fssystem {
const auto aligned_offset_end = util::AlignUp(offset + valid_size, DataAlign);
const auto aligned_size = aligned_offset_end - aligned_offset;
return m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size);
R_RETURN(m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size));
}
}
};
@ -166,7 +166,7 @@ namespace ams::fssystem {
PooledBuffer pooled_buffer;
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<char *>(buffer), size);
R_RETURN(AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<char *>(buffer), size));
}
virtual Result Write(s64 offset, const void *buffer, size_t size) override {
@ -184,16 +184,16 @@ namespace ams::fssystem {
PooledBuffer pooled_buffer;
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<const char *>(buffer), size);
R_RETURN(AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<const char *>(buffer), size));
}
virtual Result Flush() override {
return m_base_storage->Flush();
R_RETURN(m_base_storage->Flush());
}
virtual Result SetSize(s64 size) override {
ON_SCOPE_EXIT { m_is_base_storage_size_dirty = true; };
return m_base_storage->SetSize(util::AlignUp(size, m_data_align));
R_RETURN(m_base_storage->SetSize(util::AlignUp(size, m_data_align)));
}
virtual Result GetSize(s64 *out) override {
@ -213,7 +213,7 @@ namespace ams::fssystem {
virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
if (op_id == fs::OperationId::Invalidate) {
return m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size);
R_RETURN(m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size));
} else {
/* Succeed if zero size. */
R_SUCCEED_IF(size == 0);
@ -229,7 +229,7 @@ namespace ams::fssystem {
const auto aligned_offset_end = util::AlignUp(offset + valid_size, m_data_align);
const auto aligned_size = aligned_offset_end - aligned_offset;
return m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size);
R_RETURN(m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size));
}
}
};
@ -272,16 +272,16 @@ namespace ams::fssystem {
/* Allocate a pooled buffer. */
PooledBuffer pooled_buffer(m_data_align, m_data_align);
return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<const char *>(buffer), size);
R_RETURN(AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(), pooled_buffer.GetSize(), m_data_align, BufferAlign, offset, static_cast<const char *>(buffer), size));
}
virtual Result Flush() override {
return m_base_storage->Flush();
R_RETURN(m_base_storage->Flush());
}
virtual Result SetSize(s64 size) override {
ON_SCOPE_EXIT { m_base_storage_size = -1; };
return m_base_storage->SetSize(util::AlignUp(size, m_data_align));
R_RETURN(m_base_storage->SetSize(util::AlignUp(size, m_data_align)));
}
virtual Result GetSize(s64 *out) override {
@ -300,7 +300,7 @@ namespace ams::fssystem {
virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
if (op_id == fs::OperationId::Invalidate) {
return m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size);
R_RETURN(m_base_storage->OperateRange(fs::OperationId::Invalidate, offset, size));
} else {
/* Succeed if zero size. */
R_SUCCEED_IF(size == 0);
@ -316,7 +316,7 @@ namespace ams::fssystem {
const auto aligned_offset_end = util::AlignUp(offset + valid_size, m_data_align);
const auto aligned_size = aligned_offset_end - aligned_offset;
return m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size);
R_RETURN(m_base_storage->OperateRange(dst, dst_size, op_id, aligned_offset, aligned_size, src, src_size));
}
}
};

View file

@ -26,11 +26,11 @@ namespace ams::fssystem {
static Result Write(fs::IStorage *base_storage, char *work_buf, size_t work_buf_size, size_t data_alignment, size_t buffer_alignment, s64 offset, const char *buffer, size_t size);
static Result Read(std::shared_ptr<fs::IStorage> &base_storage, char *work_buf, size_t work_buf_size, size_t data_alignment, size_t buffer_alignment, s64 offset, char *buffer, size_t size) {
return Read(base_storage.get(), work_buf, work_buf_size, data_alignment, buffer_alignment, offset, buffer, size);
R_RETURN(Read(base_storage.get(), work_buf, work_buf_size, data_alignment, buffer_alignment, offset, buffer, size));
}
static Result Write(std::shared_ptr<fs::IStorage> &base_storage, char *work_buf, size_t work_buf_size, size_t data_alignment, size_t buffer_alignment, s64 offset, const char *buffer, size_t size) {
return Write(base_storage.get(), work_buf, work_buf_size, data_alignment, buffer_alignment, offset, buffer, size);
R_RETURN(Write(base_storage.get(), work_buf, work_buf_size, data_alignment, buffer_alignment, offset, buffer, size));
}
};

View file

@ -166,7 +166,7 @@ namespace ams::fssystem {
std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
/* Scan. */
return m_tree->ScanContinuousReading<EntryType>(out_info, param);
R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
}
}

View file

@ -113,7 +113,7 @@ namespace ams::fssystem {
bool IsInitialized() const { return m_table.IsInitialized(); }
Result Initialize(IAllocator *allocator, fs::SubStorage node_storage, fs::SubStorage entry_storage, s32 entry_count) {
return m_table.Initialize(allocator, node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count);
R_RETURN(m_table.Initialize(allocator, node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
}
void SetStorage(s32 idx, fs::SubStorage storage) {

View file

@ -89,12 +89,12 @@ namespace ams::fssystem {
template<impl::IterateDirectoryHandler OnEnterDir, impl::IterateDirectoryHandler OnExitDir, impl::IterateDirectoryHandler OnFile>
Result IterateDirectoryRecursively(fs::fsa::IFileSystem *fs, const fs::Path &root_path, OnEnterDir on_enter_dir, OnExitDir on_exit_dir, OnFile on_file) {
fs::DirectoryEntry dir_entry = {};
return IterateDirectoryRecursively(fs, root_path, std::addressof(dir_entry), on_enter_dir, on_exit_dir, on_file);
R_RETURN(IterateDirectoryRecursively(fs, root_path, std::addressof(dir_entry), on_enter_dir, on_exit_dir, on_file));
}
template<impl::IterateDirectoryHandler OnEnterDir, impl::IterateDirectoryHandler OnExitDir, impl::IterateDirectoryHandler OnFile>
Result IterateDirectoryRecursively(fs::fsa::IFileSystem *fs, OnEnterDir on_enter_dir, OnExitDir on_exit_dir, OnFile on_file) {
return IterateDirectoryRecursively(fs, fs::MakeConstantPath("/"), on_enter_dir, on_exit_dir, on_file);
R_RETURN(IterateDirectoryRecursively(fs, fs::MakeConstantPath("/"), on_enter_dir, on_exit_dir, on_file));
}
/* TODO: Cleanup API */
@ -103,13 +103,13 @@ namespace ams::fssystem {
Result CopyFile(fs::fsa::IFileSystem *dst_fs, fs::fsa::IFileSystem *src_fs, const fs::Path &dst_path, const fs::Path &src_path, void *work_buf, size_t work_buf_size);
ALWAYS_INLINE Result CopyFile(fs::fsa::IFileSystem *fs, const fs::Path &dst_path, const fs::Path &src_path, void *work_buf, size_t work_buf_size) {
return CopyFile(fs, fs, dst_path, src_path, work_buf, work_buf_size);
R_RETURN(CopyFile(fs, fs, dst_path, src_path, work_buf, work_buf_size));
}
Result CopyDirectoryRecursively(fs::fsa::IFileSystem *dst_fs, fs::fsa::IFileSystem *src_fs, const fs::Path &dst_path, const fs::Path &src_path, fs::DirectoryEntry *entry, void *work_buf, size_t work_buf_size);
ALWAYS_INLINE Result CopyDirectoryRecursively(fs::fsa::IFileSystem *fs, const fs::Path &dst_path, const fs::Path &src_path, fs::DirectoryEntry *entry, void *work_buf, size_t work_buf_size) {
return CopyDirectoryRecursively(fs, fs, dst_path, src_path, entry, work_buf, work_buf_size);
R_RETURN(CopyDirectoryRecursively(fs, fs, dst_path, src_path, entry, work_buf, work_buf_size));
}
/* Semaphore adapter class. */
@ -147,7 +147,7 @@ namespace ams::fssystem {
}
ALWAYS_INLINE Result RetryToAvoidTargetLocked(auto f) {
return RetryFinitelyForTargetLocked<2, 25>(f);
R_RETURN((RetryFinitelyForTargetLocked<2, 25>(f)));
}
void AddCounter(void *counter, size_t counter_size, u64 value);