kern: implement additional randomness in KPageHeap allocations

This commit is contained in:
Michael Scire 2022-03-22 15:29:55 -07:00 committed by SciresM
parent 24739f245e
commit 401047f603
6 changed files with 150 additions and 18 deletions

View file

@ -73,6 +73,7 @@ namespace ams::kern {
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { return m_heap.AllocateAligned(index, num_pages, align_pages); }
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }

View file

@ -20,7 +20,7 @@
namespace ams::kern {
class KPageBitmap {
private:
public:
class RandomBitGenerator {
private:
util::TinyMT m_rng;
@ -42,12 +42,43 @@ namespace ams::kern {
--m_bits_available;
return rnd_bit;
}
u64 GenerateRandomBits(u32 num_bits) {
u64 result = 0;
/* Iteratively add random bits to our result. */
while (num_bits > 0) {
/* Ensure we have random bits to take from. */
if (m_bits_available == 0) {
this->RefreshEntropy();
}
/* Determine how many bits to take this round. */
const auto cur_bits = std::min(num_bits, m_bits_available);
/* Generate mask for our current bits. */
const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;
/* Add bits to output from our entropy. */
result <<= cur_bits;
result |= (m_entropy & mask);
/* Remove bits from our entropy. */
m_entropy >>= cur_bits;
m_bits_available -= cur_bits;
/* Advance. */
num_bits -= cur_bits;
}
return result;
}
public:
RandomBitGenerator() : m_entropy(), m_bits_available() {
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
}
size_t SelectRandomBit(u64 bitmap) {
u64 SelectRandomBit(u64 bitmap) {
u64 selected = 0;
for (size_t cur_num_bits = BITSIZEOF(bitmap) / 2; cur_num_bits != 0; cur_num_bits /= 2) {
@ -66,6 +97,17 @@ namespace ams::kern {
return selected;
}
u64 GenerateRandom(u64 max) {
/* Determine the number of bits we need. */
const u64 bits_needed = 1 + (BITSIZEOF(max) - util::CountLeadingZeros(max));
/* Generate a random value of the desired bitwidth. */
const u64 rnd = this->GenerateRandomBits(bits_needed);
/* Adjust the value to be in range. */
return rnd - ((rnd / max) * max);
}
};
public:
static constexpr size_t MaxDepth = 4;

View file

@ -27,7 +27,7 @@ namespace ams::kern {
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
@ -36,7 +36,7 @@ namespace ams::kern {
static constexpr s32 GetBlockIndex(size_t num_pages) {
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
@ -44,7 +44,7 @@ namespace ams::kern {
}
static constexpr size_t GetBlockSize(size_t index) {
return size_t(1) << MemoryBlockPageShifts[index];
return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
}
static constexpr size_t GetBlockNumPages(size_t index) {
@ -128,13 +128,14 @@ namespace ams::kern {
size_t m_initial_used_size;
size_t m_num_blocks;
Block m_blocks[NumMemoryBlockPageShifts];
KPageBitmap::RandomBitGenerator m_rng;
private:
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
size_t GetNumFreePages() const;
void FreeBlock(KPhysicalAddress block, s32 index);
public:
KPageHeap() : m_heap_address(Null<KPhysicalAddress>), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
KPageHeap() : m_heap_address(Null<KPhysicalAddress>), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks(), m_rng() { /* ... */ }
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
constexpr size_t GetSize() const { return m_heap_size; }
@ -158,9 +159,25 @@ namespace ams::kern {
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
KPhysicalAddress AllocateBlock(s32 index, bool random);
KPhysicalAddress AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
} else {
return this->AllocateByLinearSearch(index);
}
}
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
/* TODO: linear search support? */
return this->AllocateByRandom(index, num_pages, align_pages);
}
void Free(KPhysicalAddress addr, size_t num_pages);
private:
KPhysicalAddress AllocateByLinearSearch(s32 index);
KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {