strat: use m_ for member variables

This commit is contained in:
Michael Scire 2021-10-10 00:14:06 -07:00
parent ce28591ab2
commit a595c232b9
425 changed files with 8531 additions and 8484 deletions

View file

@ -31,43 +31,43 @@ namespace ams::fssystem {
public:
using difference_type = s64;
private:
s64 offset;
s32 stride;
s64 m_offset;
s32 m_stride;
public:
constexpr Offset(s64 offset, s32 stride) : offset(offset), stride(stride) { /* ... */ }
constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) { /* ... */ }
constexpr Offset &operator++() { this->offset += this->stride; return *this; }
constexpr Offset operator++(int) { Offset ret(*this); this->offset += this->stride; return ret; }
constexpr Offset &operator++() { m_offset += m_stride; return *this; }
constexpr Offset operator++(int) { Offset ret(*this); m_offset += m_stride; return ret; }
constexpr Offset &operator--() { this->offset -= this->stride; return *this; }
constexpr Offset operator--(int) { Offset ret(*this); this->offset -= this->stride; return ret; }
constexpr Offset &operator--() { m_offset -= m_stride; return *this; }
constexpr Offset operator--(int) { Offset ret(*this); m_offset -= m_stride; return ret; }
constexpr difference_type operator-(const Offset &rhs) const { return (this->offset - rhs.offset) / this->stride; }
constexpr difference_type operator-(const Offset &rhs) const { return (m_offset - rhs.m_offset) / m_stride; }
constexpr Offset operator+(difference_type ofs) const { return Offset(this->offset + ofs * this->stride, this->stride); }
constexpr Offset operator-(difference_type ofs) const { return Offset(this->offset - ofs * this->stride, this->stride); }
constexpr Offset operator+(difference_type ofs) const { return Offset(m_offset + ofs * m_stride, m_stride); }
constexpr Offset operator-(difference_type ofs) const { return Offset(m_offset - ofs * m_stride, m_stride); }
constexpr Offset &operator+=(difference_type ofs) { this->offset += ofs * this->stride; return *this; }
constexpr Offset &operator-=(difference_type ofs) { this->offset -= ofs * this->stride; return *this; }
constexpr Offset &operator+=(difference_type ofs) { m_offset += ofs * m_stride; return *this; }
constexpr Offset &operator-=(difference_type ofs) { m_offset -= ofs * m_stride; return *this; }
constexpr bool operator==(const Offset &rhs) const { return this->offset == rhs.offset; }
constexpr bool operator!=(const Offset &rhs) const { return this->offset != rhs.offset; }
constexpr bool operator==(const Offset &rhs) const { return m_offset == rhs.m_offset; }
constexpr bool operator!=(const Offset &rhs) const { return m_offset != rhs.m_offset; }
constexpr s64 Get() const { return this->offset; }
constexpr s64 Get() const { return m_offset; }
};
private:
const Offset start;
const s32 count;
s32 index;
const Offset m_start;
const s32 m_count;
s32 m_index;
public:
StorageNode(size_t size, s32 count) : start(NodeHeaderSize, static_cast<s32>(size)), count(count), index(-1) { /* ... */ }
StorageNode(s64 ofs, size_t size, s32 count) : start(NodeHeaderSize + ofs, static_cast<s32>(size)), count(count), index(-1) { /* ... */ }
StorageNode(size_t size, s32 count) : m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) { /* ... */ }
StorageNode(s64 ofs, size_t size, s32 count) : m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) { /* ... */ }
s32 GetIndex() const { return this->index; }
s32 GetIndex() const { return m_index; }
void Find(const char *buffer, s64 virtual_address) {
s32 end = this->count;
auto pos = this->start;
s32 end = m_count;
auto pos = m_start;
while (end > 0) {
auto half = end / 2;
@ -84,12 +84,12 @@ namespace ams::fssystem {
}
}
this->index = static_cast<s32>(pos - this->start) - 1;
m_index = static_cast<s32>(pos - m_start) - 1;
}
Result Find(fs::SubStorage &storage, s64 virtual_address) {
s32 end = this->count;
auto pos = this->start;
s32 end = m_count;
auto pos = m_start;
while (end > 0) {
auto half = end / 2;
@ -106,7 +106,7 @@ namespace ams::fssystem {
}
}
this->index = static_cast<s32>(pos - this->start) - 1;
m_index = static_cast<s32>(pos - m_start) - 1;
return ResultSuccess();
}
};
@ -153,19 +153,19 @@ namespace ams::fssystem {
R_UNLESS(entry_count > 0, fs::ResultInvalidArgument());
/* Allocate node. */
R_UNLESS(this->node_l1.Allocate(allocator, node_size), fs::ResultBufferAllocationFailed());
auto node_guard = SCOPE_GUARD { this->node_l1.Free(node_size); };
R_UNLESS(m_node_l1.Allocate(allocator, node_size), fs::ResultBufferAllocationFailed());
auto node_guard = SCOPE_GUARD { m_node_l1.Free(node_size); };
/* Read node. */
R_TRY(node_storage.Read(0, this->node_l1.Get(), node_size));
R_TRY(node_storage.Read(0, m_node_l1.Get(), node_size));
/* Verify node. */
R_TRY(this->node_l1->Verify(0, node_size, sizeof(s64)));
R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64)));
/* Validate offsets. */
const auto offset_count = GetOffsetCount(node_size);
const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
const auto * const node = this->node_l1.Get<Node>();
const auto * const node = m_node_l1.Get<Node>();
s64 start_offset;
if (offset_count < entry_set_count && node->GetCount() < offset_count) {
@ -179,15 +179,15 @@ namespace ams::fssystem {
R_UNLESS(start_offset < end_offset, fs::ResultInvalidBucketTreeEntryOffset());
/* Set member variables. */
this->node_storage = node_storage;
this->entry_storage = entry_storage;
this->node_size = node_size;
this->entry_size = entry_size;
this->entry_count = entry_count;
this->offset_count = offset_count;
this->entry_set_count = entry_set_count;
this->start_offset = start_offset;
this->end_offset = end_offset;
m_node_storage = node_storage;
m_entry_storage = entry_storage;
m_node_size = node_size;
m_entry_size = entry_size;
m_entry_count = entry_count;
m_offset_count = offset_count;
m_entry_set_count = entry_set_count;
m_start_offset = start_offset;
m_end_offset = end_offset;
/* Cancel guard. */
node_guard.Cancel();
@ -200,22 +200,22 @@ namespace ams::fssystem {
AMS_ASSERT(end_offset > 0);
AMS_ASSERT(!this->IsInitialized());
this->node_size = node_size;
this->end_offset = end_offset;
m_node_size = node_size;
m_end_offset = end_offset;
}
void BucketTree::Finalize() {
if (this->IsInitialized()) {
this->node_storage = fs::SubStorage();
this->entry_storage = fs::SubStorage();
this->node_l1.Free(this->node_size);
this->node_size = 0;
this->entry_size = 0;
this->entry_count = 0;
this->offset_count = 0;
this->entry_set_count = 0;
this->start_offset = 0;
this->end_offset = 0;
m_node_storage = fs::SubStorage();
m_entry_storage = fs::SubStorage();
m_node_l1.Free(m_node_size);
m_node_size = 0;
m_entry_size = 0;
m_entry_count = 0;
m_offset_count = 0;
m_entry_set_count = 0;
m_start_offset = 0;
m_end_offset = 0;
}
}
@ -235,23 +235,23 @@ namespace ams::fssystem {
/* Invalidate the node storage cache. */
{
s64 storage_size;
R_TRY(this->node_storage.GetSize(std::addressof(storage_size)));
R_TRY(this->node_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
R_TRY(m_node_storage.GetSize(std::addressof(storage_size)));
R_TRY(m_node_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
}
/* Refresh start/end offsets. */
{
/* Read node. */
R_TRY(node_storage.Read(0, this->node_l1.Get(), this->node_size));
R_TRY(m_node_storage.Read(0, m_node_l1.Get(), m_node_size));
/* Verify node. */
R_TRY(this->node_l1->Verify(0, this->node_size, sizeof(s64)));
R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
/* Validate offsets. */
const auto * const node = this->node_l1.Get<Node>();
const auto * const node = m_node_l1.Get<Node>();
s64 start_offset;
if (offset_count < this->entry_set_count && node->GetCount() < this->offset_count) {
if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
start_offset = *node->GetEnd();
} else {
start_offset = *node->GetBegin();
@ -262,15 +262,15 @@ namespace ams::fssystem {
R_UNLESS(start_offset < end_offset, fs::ResultInvalidBucketTreeEntryOffset());
/* Set refreshed offsets. */
this->start_offset = start_offset;
this->end_offset = end_offset;
m_start_offset = start_offset;
m_end_offset = end_offset;
}
/* Invalidate the entry storage cache. */
{
s64 storage_size;
R_TRY(this->entry_storage.GetSize(std::addressof(storage_size)));
R_TRY(this->entry_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
R_TRY(m_entry_storage.GetSize(std::addressof(storage_size)));
R_TRY(m_entry_storage.OperateRange(fs::OperationId::Invalidate, 0, storage_size));
}
return ResultSuccess();
@ -278,13 +278,13 @@ namespace ams::fssystem {
Result BucketTree::Visitor::Initialize(const BucketTree *tree) {
AMS_ASSERT(tree != nullptr);
AMS_ASSERT(this->tree == nullptr || this->tree == tree);
AMS_ASSERT(m_tree == nullptr || m_tree == tree);
if (this->entry == nullptr) {
this->entry = tree->GetAllocator()->Allocate(tree->entry_size);
R_UNLESS(this->entry != nullptr, fs::ResultBufferAllocationFailed());
if (m_entry == nullptr) {
m_entry = tree->GetAllocator()->Allocate(tree->m_entry_size);
R_UNLESS(m_entry != nullptr, fs::ResultBufferAllocationFailed());
this->tree = tree;
m_tree = tree;
}
return ResultSuccess();
@ -294,35 +294,35 @@ namespace ams::fssystem {
R_UNLESS(this->IsValid(), fs::ResultOutOfRange());
/* Invalidate our index, and read the header for the next index. */
auto entry_index = this->entry_index + 1;
if (entry_index == this->entry_set.info.count) {
const auto entry_set_index = this->entry_set.info.index + 1;
R_UNLESS(entry_set_index < this->entry_set_count, fs::ResultOutOfRange());
auto entry_index = m_entry_index + 1;
if (entry_index == m_entry_set.info.count) {
const auto entry_set_index = m_entry_set.info.index + 1;
R_UNLESS(entry_set_index < m_entry_set_count, fs::ResultOutOfRange());
this->entry_index = -1;
m_entry_index = -1;
const auto end = this->entry_set.info.end;
const auto end = m_entry_set.info.end;
const auto entry_set_size = this->tree->node_size;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
R_TRY(this->tree->entry_storage.Read(entry_set_offset, std::addressof(this->entry_set), sizeof(EntrySetHeader)));
R_TRY(this->entry_set.header.Verify(entry_set_index, entry_set_size, this->tree->entry_size));
R_TRY(m_tree->m_entry_storage.Read(entry_set_offset, std::addressof(m_entry_set), sizeof(EntrySetHeader)));
R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
R_UNLESS(this->entry_set.info.start == end && this->entry_set.info.start < this->entry_set.info.end, fs::ResultInvalidBucketTreeEntrySetOffset());
R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end, fs::ResultInvalidBucketTreeEntrySetOffset());
entry_index = 0;
} else {
this->entry_index = 1;
m_entry_index = 1;
}
/* Read the new entry. */
const auto entry_size = this->tree->entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(this->entry_set.info.index, this->tree->node_size, entry_size, entry_index);
R_TRY(this->tree->entry_storage.Read(entry_offset, std::addressof(this->entry), entry_size));
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
R_TRY(m_tree->m_entry_storage.Read(entry_offset, std::addressof(m_entry), entry_size));
/* Note that we changed index. */
this->entry_index = entry_index;
m_entry_index = entry_index;
return ResultSuccess();
}
@ -330,52 +330,52 @@ namespace ams::fssystem {
R_UNLESS(this->IsValid(), fs::ResultOutOfRange());
/* Invalidate our index, and read the heasder for the previous index. */
auto entry_index = this->entry_index;
auto entry_index = m_entry_index;
if (entry_index == 0) {
R_UNLESS(this->entry_set.info.index > 0, fs::ResultOutOfRange());
R_UNLESS(m_entry_set.info.index > 0, fs::ResultOutOfRange());
this->entry_index = -1;
m_entry_index = -1;
const auto start = this->entry_set.info.start;
const auto start = m_entry_set.info.start;
const auto entry_set_size = this->tree->node_size;
const auto entry_set_index = this->entry_set.info.index - 1;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_index = m_entry_set.info.index - 1;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
R_TRY(this->tree->entry_storage.Read(entry_set_offset, std::addressof(this->entry_set), sizeof(EntrySetHeader)));
R_TRY(this->entry_set.header.Verify(entry_set_index, entry_set_size, this->tree->entry_size));
R_TRY(m_tree->m_entry_storage.Read(entry_set_offset, std::addressof(m_entry_set), sizeof(EntrySetHeader)));
R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
R_UNLESS(this->entry_set.info.end == start && this->entry_set.info.start < this->entry_set.info.end, fs::ResultInvalidBucketTreeEntrySetOffset());
R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end, fs::ResultInvalidBucketTreeEntrySetOffset());
entry_index = this->entry_set.info.count;
entry_index = m_entry_set.info.count;
} else {
this->entry_index = -1;
m_entry_index = -1;
}
--entry_index;
/* Read the new entry. */
const auto entry_size = this->tree->entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(this->entry_set.info.index, this->tree->node_size, entry_size, entry_index);
R_TRY(this->tree->entry_storage.Read(entry_offset, std::addressof(this->entry), entry_size));
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
R_TRY(m_tree->m_entry_storage.Read(entry_offset, std::addressof(m_entry), entry_size));
/* Note that we changed index. */
this->entry_index = entry_index;
m_entry_index = entry_index;
return ResultSuccess();
}
Result BucketTree::Visitor::Find(s64 virtual_address) {
AMS_ASSERT(this->tree != nullptr);
AMS_ASSERT(m_tree != nullptr);
/* Get the node. */
const auto * const node = this->tree->node_l1.Get<Node>();
const auto * const node = m_tree->m_node_l1.Get<Node>();
R_UNLESS(virtual_address < node->GetEndOffset(), fs::ResultOutOfRange());
/* Get the entry set index. */
s32 entry_set_index = -1;
if (this->tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
const auto start = node->GetEnd();
const auto end = node->GetBegin() + tree->offset_count;
const auto end = node->GetBegin() + m_tree->m_offset_count;
auto pos = std::upper_bound(start, end, virtual_address);
R_UNLESS(start < pos, fs::ResultOutOfRange());
@ -390,9 +390,9 @@ namespace ams::fssystem {
R_UNLESS(start < pos, fs::ResultOutOfRange());
--pos;
if (this->tree->IsExistL2()) {
if (m_tree->IsExistL2()) {
const auto node_index = static_cast<s32>(pos - start);
R_UNLESS(0 <= node_index && node_index < this->tree->offset_count, fs::ResultInvalidBucketTreeNodeOffset());
R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count, fs::ResultInvalidBucketTreeNodeOffset());
R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index));
} else {
@ -401,18 +401,18 @@ namespace ams::fssystem {
}
/* Validate the entry set index. */
R_UNLESS(0 <= entry_set_index && entry_set_index < this->tree->entry_set_count, fs::ResultInvalidBucketTreeNodeOffset());
R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count, fs::ResultInvalidBucketTreeNodeOffset());
/* Find the entry. */
R_TRY(this->FindEntry(virtual_address, entry_set_index));
/* Set count. */
this->entry_set_count = this->tree->entry_set_count;
m_entry_set_count = m_tree->m_entry_set_count;
return ResultSuccess();
}
Result BucketTree::Visitor::FindEntrySet(s32 *out_index, s64 virtual_address, s32 node_index) {
const auto node_size = this->tree->node_size;
const auto node_size = m_tree->m_node_size;
PooledBuffer pool(node_size, 1);
if (node_size <= pool.GetSize()) {
@ -425,9 +425,9 @@ namespace ams::fssystem {
Result BucketTree::Visitor::FindEntrySetWithBuffer(s32 *out_index, s64 virtual_address, s32 node_index, char *buffer) {
/* Calculate node extents. */
const auto node_size = this->tree->node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
fs::SubStorage &storage = tree->node_storage;
const auto node_size = m_tree->m_node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
fs::SubStorage &storage = m_tree->m_node_storage;
/* Read the node. */
R_TRY(storage.Read(node_offset, buffer, node_size));
@ -443,15 +443,15 @@ namespace ams::fssystem {
R_UNLESS(node.GetIndex() >= 0, fs::ResultInvalidBucketTreeVirtualOffset());
/* Return the index. */
*out_index = this->tree->GetEntrySetIndex(header.index, node.GetIndex());
*out_index = m_tree->GetEntrySetIndex(header.index, node.GetIndex());
return ResultSuccess();
}
Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32 *out_index, s64 virtual_address, s32 node_index) {
/* Calculate node extents. */
const auto node_size = this->tree->node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
fs::SubStorage &storage = tree->node_storage;
const auto node_size = m_tree->m_node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
fs::SubStorage &storage = m_tree->m_node_storage;
/* Read and validate the header. */
NodeHeader header;
@ -464,12 +464,12 @@ namespace ams::fssystem {
R_UNLESS(node.GetIndex() >= 0, fs::ResultOutOfRange());
/* Return the index. */
*out_index = this->tree->GetEntrySetIndex(header.index, node.GetIndex());
*out_index = m_tree->GetEntrySetIndex(header.index, node.GetIndex());
return ResultSuccess();
}
Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
const auto entry_set_size = this->tree->node_size;
const auto entry_set_size = m_tree->m_node_size;
PooledBuffer pool(entry_set_size, 1);
if (entry_set_size <= pool.GetSize()) {
@ -482,10 +482,10 @@ namespace ams::fssystem {
Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char *buffer) {
/* Calculate entry set extents. */
const auto entry_size = this->tree->entry_size;
const auto entry_set_size = this->tree->node_size;
const auto entry_size = m_tree->m_entry_size;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
fs::SubStorage &storage = tree->entry_storage;
fs::SubStorage &storage = m_tree->m_entry_storage;
/* Read the entry set. */
R_TRY(storage.Read(entry_set_offset, buffer, entry_set_size));
@ -503,21 +503,21 @@ namespace ams::fssystem {
/* Copy the data into entry. */
const auto entry_index = node.GetIndex();
const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index);
std::memcpy(this->entry, buffer + entry_offset, entry_size);
std::memcpy(m_entry, buffer + entry_offset, entry_size);
/* Set our entry set/index. */
this->entry_set = entry_set;
this->entry_index = entry_index;
m_entry_set = entry_set;
m_entry_index = entry_index;
return ResultSuccess();
}
Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) {
/* Calculate entry set extents. */
const auto entry_size = this->tree->entry_size;
const auto entry_set_size = this->tree->node_size;
const auto entry_size = m_tree->m_entry_size;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
fs::SubStorage &storage = tree->entry_storage;
fs::SubStorage &storage = m_tree->m_entry_storage;
/* Read and validate the entry_set. */
EntrySetHeader entry_set;
@ -532,11 +532,11 @@ namespace ams::fssystem {
/* Copy the data into entry. */
const auto entry_index = node.GetIndex();
const auto entry_offset = impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index);
R_TRY(storage.Read(entry_offset, this->entry, entry_size));
R_TRY(storage.Read(entry_offset, m_entry, entry_size));
/* Set our entry set/index. */
this->entry_set = entry_set;
this->entry_index = entry_index;
m_entry_set = entry_set;
m_entry_index = entry_index;
return ResultSuccess();
}