kern: greatly improve codegen for atomics, scheduler

This commit is contained in:
Michael Scire 2021-01-08 02:13:36 -08:00
parent f051f707ed
commit 4aa18b06e8
19 changed files with 67 additions and 46 deletions

View file

@ -85,7 +85,7 @@ namespace ams::kern {
virtual KProcess *GetOwner() const { return nullptr; }
u32 GetReferenceCount() const {
return m_ref_count;
return m_ref_count.load();
}
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {

View file

@ -49,9 +49,9 @@ namespace ams::kern {
constexpr KVirtualAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; }
constexpr size_t GetUsed() const { return m_used; }
constexpr size_t GetPeak() const { return m_peak; }
constexpr size_t GetCount() const { return m_count; }
constexpr size_t GetUsed() const { return m_used.load(); }
constexpr size_t GetPeak() const { return m_peak.load(); }
constexpr size_t GetCount() const { return m_count.load(); }
constexpr bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
@ -65,7 +65,7 @@ namespace ams::kern {
/* Free blocks to memory. */
u8 *cur = GetPointer<u8>(m_address + m_size);
for (size_t i = 0; i < m_count; i++) {
for (size_t i = 0; i < sz / sizeof(T); i++) {
cur -= sizeof(T);
this->GetImpl()->Free(cur);
}
@ -84,13 +84,13 @@ namespace ams::kern {
this->Initialize(page_allocator);
/* Allocate until we have the correct number of objects. */
while (m_count < num_objects) {
while (m_count.load() < num_objects) {
auto *allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
this->GetImpl()->Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
}
}
@ -106,7 +106,7 @@ namespace ams::kern {
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
this->GetImpl()->Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
}
}
}
@ -116,8 +116,8 @@ namespace ams::kern {
new (allocated) T();
/* Update our tracking. */
size_t used = ++m_used;
size_t peak = m_peak;
size_t used = m_used.fetch_add(1) + 1;
size_t peak = m_peak.load();
while (peak < used) {
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
break;
@ -130,7 +130,7 @@ namespace ams::kern {
void Free(T *t) {
this->GetImpl()->Free(t);
--m_used;
m_used.fetch_sub(1);
}
};

View file

@ -303,6 +303,7 @@ namespace ams::kern {
const auto linear_id = handle_pack.Get<HandleLinearId>();
const auto reserved = handle_pack.Get<HandleReserved>();
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_UNUSED(reserved);
/* Validate our indexing information. */
if (raw_value == 0) {

View file

@ -50,7 +50,7 @@ namespace ams::kern {
}
}
void Unlock() {
ALWAYS_INLINE void Unlock() {
MESOSPHERE_ASSERT_THIS();
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
@ -65,8 +65,8 @@ namespace ams::kern {
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
void UnlockSlowPath(uintptr_t cur_thread);
bool IsLocked() const { return m_tag != 0; }
bool IsLockedByCurrentThread() const { return (m_tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
ALWAYS_INLINE bool IsLocked() const { return m_tag.load() != 0; }
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return (m_tag.load() | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
};
using KScopedLightLock = KScopedLock<KLightLock>;

View file

@ -203,54 +203,54 @@ namespace ams::kern {
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
KPageTableImpl &GetImpl() { return m_impl; }
const KPageTableImpl &GetImpl() const { return m_impl; }
ALWAYS_INLINE KPageTableImpl &GetImpl() { return m_impl; }
ALWAYS_INLINE const KPageTableImpl &GetImpl() const { return m_impl; }
bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr);
}
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr, size);
}
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr, size);
}
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
ALWAYS_INLINE bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr);
}
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size);
}
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:

View file

@ -135,6 +135,7 @@ namespace ams::kern {
}
void UnpinThread(s32 core_id, KThread *thread) {
MESOSPHERE_UNUSED(thread);
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread);

View file

@ -38,7 +38,7 @@ namespace ams::kern {
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
struct SchedulingState {
std::atomic<bool> needs_scheduling;
std::atomic<u8> needs_scheduling;
bool interrupt_task_thread_runnable;
bool should_count_idle;
u64 idle_count;
@ -181,7 +181,7 @@ namespace ams::kern {
KScopedInterruptDisable intr_disable;
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
if (m_state.needs_scheduling) {
if (m_state.needs_scheduling.load()) {
Schedule();
}
}

View file

@ -36,8 +36,8 @@ namespace ams::kern {
}
constexpr void Open() {
const size_t ref_count = ++m_reference_count;
MESOSPHERE_ASSERT(ref_count > 0);
++m_reference_count;
MESOSPHERE_ASSERT(m_reference_count > 0);
}
constexpr bool Close() {

View file

@ -207,7 +207,7 @@ namespace ams::kern {
s32 m_original_physical_ideal_core_id{};
s32 m_num_core_migration_disables{};
ThreadState m_thread_state{};
std::atomic<bool> m_termination_requested{};
std::atomic<u8> m_termination_requested{};
bool m_wait_cancelled{};
bool m_cancellable{};
bool m_signaled{};
@ -486,7 +486,7 @@ namespace ams::kern {
MESOSPHERE_UNUSED(core_id);
}
s64 GetCpuTime() const { return m_cpu_time; }
s64 GetCpuTime() const { return m_cpu_time.load(); }
s64 GetCpuTime(s32 core_id) const {
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
@ -530,7 +530,7 @@ namespace ams::kern {
ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; }
ALWAYS_INLINE bool IsTerminationRequested() const {
return m_termination_requested || this->GetRawState() == ThreadState_Terminated;
return m_termination_requested.load() || this->GetRawState() == ThreadState_Terminated;
}
size_t GetKernelStackUsage() const;

View file

@ -40,8 +40,10 @@ namespace ams::kern {
MESOSPHERE_PANIC(__VA_ARGS__); \
} \
})
#else
#elif defined(MESOSPHERE_PRESERVE_ASSERTION_EXPRESSIONS)
#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { static_cast<void>(expr); } while (0)
#else
#define MESOSPHERE_ASSERT_IMPL(expr, ...) static_cast<void>(0)
#endif
#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s\n", #expr)
@ -56,8 +58,10 @@ namespace ams::kern {
#ifdef MESOSPHERE_BUILD_FOR_AUDITING
#define MESOSPHERE_AUDIT(expr) MESOSPHERE_ASSERT(expr)
#else
#elif defined(MESOSPHERE_PRESERVE_AUDIT_EXPRESSIONS)
#define MESOSPHERE_AUDIT(expr) do { static_cast<void>(expr); } while (0)
#else
#define MESOSPHERE_AUDIT(expr) static_cast<void>(0)
#endif
#define MESOSPHERE_TODO(arg) ({ constexpr const char *__mesosphere_todo = arg; static_cast<void>(__mesosphere_todo); MESOSPHERE_PANIC("TODO (%s): %s\n", __PRETTY_FUNCTION__, __mesosphere_todo); })