kern: implement additional randomness in KPageHeap allocations

This commit is contained in:
Michael Scire 2022-03-22 15:29:55 -07:00 committed by SciresM
parent 24739f245e
commit 401047f603
6 changed files with 150 additions and 18 deletions

View file

@ -202,7 +202,7 @@ namespace ams::kern {
Impl *chosen_manager = nullptr;
KPhysicalAddress allocated_block = Null<KPhysicalAddress>;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
if (allocated_block != Null<KPhysicalAddress>) {
break;
}
@ -213,12 +213,6 @@ namespace ams::kern {
return Null<KPhysicalAddress>;
}
/* If we allocated more than we need, free some. */
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
if (allocated_pages > num_pages) {
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
}
/* Maintain the optimized memory bitmap, if we should. */
if (m_has_optimized_process[pool]) {
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);

View file

@ -51,11 +51,11 @@ namespace ams::kern {
return num_free;
}
KPhysicalAddress KPageHeap::AllocateBlock(s32 index, bool random) {
KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(random); addr != Null<KPhysicalAddress>) {
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != Null<KPhysicalAddress>) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@ -66,6 +66,84 @@ namespace ams::kern {
return Null<KPhysicalAddress>;
}
KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
/* Get the size and required alignment. */
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
/* Determine meta-alignment of our desired alignment size. */
const size_t align_shift = util::CountTrailingZeros(align_size);
/* Decide on a block to allocate from. */
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
{
/* By default, we'll want to look at all blocks larger than our current one. */
s32 max_blocks = static_cast<s32>(m_num_blocks);
/* Determine the maximum block we should try to allocate from. */
size_t possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
/* Add the possible alignments from blocks at the current size. */
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
/* If there are enough possible alignments, we don't need to look at larger blocks. */
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
max_blocks = i + 1;
break;
}
}
/* If we have any possible alignments which require a larger block, we need to pick one. */
if (possible_alignments > 0 && index + 1 < max_blocks) {
/* Select a random alignment from the possibilities. */
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
/* Determine which block corresponds to the random alignment we chose. */
possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
/* Add the possible alignments from blocks at the current size. */
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
/* If the current block gets us to our random choice, use the current block. */
if (rnd < possible_alignments) {
index = i;
break;
}
}
}
}
/* Pop a block from the index we selected. */
if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != Null<KPhysicalAddress>) {
/* Determine how much size we have left over. */
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; leftover_size > 0) {
/* Determine how many valid alignments we can have. */
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
/* Select a random valid alignment. */
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
/* Free memory before the random offset. */
if (random_offset != 0) {
this->Free(addr, random_offset / PageSize);
}
/* Advance our block by the random offset. */
addr += random_offset;
/* Free memory after our allocated block. */
if (random_offset != leftover_size) {
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
}
}
/* Return the block we allocated. */
return addr;
}
return Null<KPhysicalAddress>;
}
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);

View file

@ -3608,13 +3608,13 @@ namespace ams::kern {
/* Allocate the start page as needed. */
if (aligned_src_start < mapping_src_start) {
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
}
/* Allocate the end page as needed. */
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
}