mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-05-29 14:05:17 -04:00
kern/util: use custom atomics wrapper to substantially improve codegen
This commit is contained in:
parent
52332e8d75
commit
d74f364107
26 changed files with 688 additions and 260 deletions
|
@ -53,6 +53,8 @@
|
|||
#include <vapours/util/util_fixed_map.hpp>
|
||||
#include <vapours/util/util_fixed_set.hpp>
|
||||
|
||||
#include <vapours/util/util_atomic.hpp>
|
||||
|
||||
#ifdef ATMOSPHERE_IS_STRATOSPHERE
|
||||
#include <vapours/util/util_mutex_utils.hpp>
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
* Copyright (c) Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours/common.hpp>
|
||||
#include <vapours/assert.hpp>
|
||||
|
||||
namespace ams::util {
|
||||
|
||||
namespace impl {
|
||||
|
||||
template<typename T>
|
||||
struct AtomicIntegerStorage;
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u8))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u8;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u16))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u16;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u32))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u32;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u64))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u64;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
concept UsableAtomicType = (sizeof(T) <= sizeof(u64)) && !std::is_const<T>::value && !std::is_volatile<T>::value && (std::is_pointer<T>::value || requires (const T &t) {
|
||||
std::bit_cast<typename AtomicIntegerStorage<T>::Type, T>(t);
|
||||
});
|
||||
|
||||
template<UsableAtomicType T>
|
||||
using AtomicStorage = typename AtomicIntegerStorage<T>::Type;
|
||||
|
||||
static_assert(std::same_as<AtomicStorage<void *>, uintptr_t>);
|
||||
static_assert(std::same_as<AtomicStorage<s8>, u8>);
|
||||
static_assert(std::same_as<AtomicStorage<u8>, u8>);
|
||||
static_assert(std::same_as<AtomicStorage<s16>, u16>);
|
||||
static_assert(std::same_as<AtomicStorage<u16>, u16>);
|
||||
static_assert(std::same_as<AtomicStorage<s32>, u32>);
|
||||
static_assert(std::same_as<AtomicStorage<u32>, u32>);
|
||||
static_assert(std::same_as<AtomicStorage<s64>, u64>);
|
||||
static_assert(std::same_as<AtomicStorage<u64>, u64>);
|
||||
|
||||
ALWAYS_INLINE void ClearExclusiveForAtomic() {
|
||||
__asm__ __volatile__("clrex" ::: "memory");
|
||||
}
|
||||
|
||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(_FNAME_, _MNEMONIC_) \
|
||||
template<std::unsigned_integral T> T _FNAME_ ##ForAtomic(const volatile T *); \
|
||||
\
|
||||
template<> ALWAYS_INLINE u8 _FNAME_ ##ForAtomic(const volatile u8 *p) { u8 v; __asm__ __volatile__(_MNEMONIC_ "b %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
|
||||
template<> ALWAYS_INLINE u16 _FNAME_ ##ForAtomic(const volatile u16 *p) { u16 v; __asm__ __volatile__(_MNEMONIC_ "h %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
|
||||
template<> ALWAYS_INLINE u32 _FNAME_ ##ForAtomic(const volatile u32 *p) { u32 v; __asm__ __volatile__(_MNEMONIC_ " %w[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; } \
|
||||
template<> ALWAYS_INLINE u64 _FNAME_ ##ForAtomic(const volatile u64 *p) { u64 v; __asm__ __volatile__(_MNEMONIC_ " %[v], %[p]" : [v]"=r"(v) : [p]"Q"(*p) : "memory"); return v; }
|
||||
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquire, "ldar")
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadExclusive, "ldxr")
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION(LoadAcquireExclusive, "ldaxr")
|
||||
|
||||
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_LOAD_FUNCTION
|
||||
|
||||
template<std::unsigned_integral T> void StoreReleaseForAtomic(volatile T *, T);
|
||||
|
||||
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u8 *p, u8 v) { __asm__ __volatile__("stlrb %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
|
||||
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u16 *p, u16 v) { __asm__ __volatile__("stlrh %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
|
||||
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u32 *p, u32 v) { __asm__ __volatile__("stlr %w[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
|
||||
template<> ALWAYS_INLINE void StoreReleaseForAtomic(volatile u64 *p, u64 v) { __asm__ __volatile__("stlr %[v], %[p]" : : [v]"r"(v), [p]"Q"(*p) : "memory"); }
|
||||
|
||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(_FNAME_, _MNEMONIC_) \
|
||||
template<std::unsigned_integral T> bool _FNAME_ ##ForAtomic(volatile T *, T); \
|
||||
\
|
||||
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u8 *p, u8 v) { int result; __asm__ __volatile__(_MNEMONIC_ "b %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
|
||||
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u16 *p, u16 v) { int result; __asm__ __volatile__(_MNEMONIC_ "h %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
|
||||
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u32 *p, u32 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %w[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; } \
|
||||
template<> ALWAYS_INLINE bool _FNAME_ ##ForAtomic(volatile u64 *p, u64 v) { int result; __asm__ __volatile__(_MNEMONIC_ " %w[result], %[v], %[p]" : [result]"=&r"(result) : [v]"r"(v), [p]"Q"(*p) : "memory"); return result == 0; }
|
||||
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreExclusive, "stxr")
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION(StoreReleaseExclusive, "stlxr")
|
||||
|
||||
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION
|
||||
|
||||
}
|
||||
|
||||
template<impl::UsableAtomicType T>
|
||||
class Atomic {
|
||||
NON_COPYABLE(Atomic);
|
||||
NON_MOVEABLE(Atomic);
|
||||
private:
|
||||
using StorageType = impl::AtomicStorage<T>;
|
||||
|
||||
static constexpr bool IsIntegral = std::integral<T>;
|
||||
|
||||
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
|
||||
if constexpr (std::integral<T>) {
|
||||
return static_cast<T>(s);
|
||||
} else if constexpr(std::is_pointer<T>::value) {
|
||||
return reinterpret_cast<T>(s);
|
||||
} else {
|
||||
return std::bit_cast<T>(s);
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
|
||||
if constexpr (std::integral<T>) {
|
||||
return static_cast<StorageType>(arg);
|
||||
} else if constexpr(std::is_pointer<T>::value) {
|
||||
if (std::is_constant_evaluated() && arg == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return reinterpret_cast<StorageType>(arg);
|
||||
} else {
|
||||
return std::bit_cast<StorageType>(arg);
|
||||
}
|
||||
}
|
||||
private:
|
||||
StorageType m_v;
|
||||
private:
|
||||
ALWAYS_INLINE volatile StorageType *GetStoragePointer() { return reinterpret_cast< volatile StorageType *>(std::addressof(m_v)); }
|
||||
ALWAYS_INLINE const volatile StorageType *GetStoragePointer() const { return reinterpret_cast<const volatile StorageType *>(std::addressof(m_v)); }
|
||||
public:
|
||||
ALWAYS_INLINE explicit Atomic() { /* ... */ }
|
||||
constexpr ALWAYS_INLINE explicit Atomic(T v) : m_v(ConvertToStorage(v)) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE T operator=(T desired) {
|
||||
if (std::is_constant_evaluated()) {
|
||||
m_v = ConvertToStorage(desired);
|
||||
} else {
|
||||
this->Store(desired);
|
||||
}
|
||||
return desired;
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE T Load() const {
|
||||
if constexpr (Order != std::memory_order_relaxed) {
|
||||
return ConvertToType(impl::LoadAcquireForAtomic(this->GetStoragePointer()));
|
||||
} else {
|
||||
return ConvertToType(*this->GetStoragePointer());
|
||||
}
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE void Store(T arg) {
|
||||
if constexpr (Order != std::memory_order_relaxed) {
|
||||
impl::StoreReleaseForAtomic(this->GetStoragePointer(), ConvertToStorage(arg));
|
||||
} else {
|
||||
*this->GetStoragePointer() = ConvertToStorage(arg);
|
||||
}
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE T Exchange(T arg) {
|
||||
volatile StorageType * const p = this->GetStoragePointer();
|
||||
const StorageType s = ConvertToStorage(arg);
|
||||
|
||||
StorageType current;
|
||||
|
||||
if constexpr (Order == std::memory_order_relaxed) {
|
||||
do {
|
||||
current = impl::LoadExclusiveForAtomic(p);
|
||||
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
|
||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
||||
do {
|
||||
current = impl::LoadAcquireExclusiveForAtomic(p);
|
||||
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
|
||||
} else if constexpr (Order == std::memory_order_release) {
|
||||
do {
|
||||
current = impl::LoadExclusiveForAtomic(p);
|
||||
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
|
||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
||||
do {
|
||||
current = impl::LoadAcquireExclusiveForAtomic(p);
|
||||
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
|
||||
} else {
|
||||
static_assert(Order != Order, "Invalid memory order");
|
||||
}
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
||||
volatile StorageType * const p = this->GetStoragePointer();
|
||||
const StorageType e = ConvertToStorage(expected);
|
||||
const StorageType d = ConvertToStorage(desired);
|
||||
|
||||
if constexpr (Order == std::memory_order_relaxed) {
|
||||
const StorageType current = impl::LoadExclusiveForAtomic(p);
|
||||
if (AMS_UNLIKELY(current != e)) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
|
||||
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
||||
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
|
||||
if (AMS_UNLIKELY(current != e)) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
|
||||
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_release) {
|
||||
const StorageType current = impl::LoadExclusiveForAtomic(p);
|
||||
if (AMS_UNLIKELY(current != e)) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
|
||||
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
||||
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
|
||||
if (AMS_UNLIKELY(current != e)) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
|
||||
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
|
||||
} else {
|
||||
static_assert(Order != Order, "Invalid memory order");
|
||||
}
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
||||
volatile StorageType * const p = this->GetStoragePointer();
|
||||
const StorageType e = ConvertToStorage(expected);
|
||||
const StorageType d = ConvertToStorage(desired);
|
||||
|
||||
if constexpr (Order == std::memory_order_relaxed) {
|
||||
StorageType current;
|
||||
do {
|
||||
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
} while (!impl::StoreExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
||||
StorageType current;
|
||||
do {
|
||||
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
} while (!impl::StoreExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_release) {
|
||||
StorageType current;
|
||||
do {
|
||||
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
|
||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
||||
StorageType current;
|
||||
do {
|
||||
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
|
||||
impl::ClearExclusiveForAtomic();
|
||||
expected = ConvertToType(current);
|
||||
return false;
|
||||
}
|
||||
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
|
||||
} else {
|
||||
static_assert(Order != Order, "Invalid memory order");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_) \
|
||||
template<bool Enable = IsIntegral, typename = typename std::enable_if<Enable, void>::type> \
|
||||
ALWAYS_INLINE T Fetch ## _OPERATION_(T arg) { \
|
||||
static_assert(Enable); \
|
||||
volatile StorageType * const p = this->GetStoragePointer(); \
|
||||
const StorageType s = ConvertToStorage(arg); \
|
||||
\
|
||||
StorageType current; \
|
||||
do { \
|
||||
current = impl::LoadAcquireExclusiveForAtomic<StorageType>(p); \
|
||||
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic<StorageType>(p, current _OPERATOR_ s))); \
|
||||
return static_cast<T>(current); \
|
||||
}
|
||||
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, +)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, -)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, &)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, |)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, ^)
|
||||
|
||||
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
|
||||
};
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Copyright (c) Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours/common.hpp>
|
||||
#include <vapours/assert.hpp>
|
||||
|
||||
namespace ams::util {
|
||||
|
||||
namespace impl {
|
||||
|
||||
template<typename T>
|
||||
struct AtomicIntegerStorage;
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u8))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u8;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u16))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u16;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u32))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u32;
|
||||
};
|
||||
|
||||
template<typename T> requires (sizeof(T) == sizeof(u64))
|
||||
struct AtomicIntegerStorage<T> {
|
||||
using Type = u64;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
concept UsableAtomicType = (sizeof(T) <= sizeof(u64)) && !std::is_const<T>::value && !std::is_volatile<T>::value && (std::is_pointer<T>::value || requires (const T &t) {
|
||||
std::bit_cast<typename AtomicIntegerStorage<T>::Type, T>(t);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
template<impl::UsableAtomicType T>
|
||||
class Atomic {
|
||||
NON_COPYABLE(Atomic);
|
||||
NON_MOVEABLE(Atomic);
|
||||
private:
|
||||
static_assert(std::atomic<T>::is_always_lock_free);
|
||||
private:
|
||||
std::atomic<T> m_v;
|
||||
public:
|
||||
ALWAYS_INLINE explicit Atomic() { /* ... */ }
|
||||
constexpr ALWAYS_INLINE explicit Atomic(T v) : m_v(v) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE T operator=(T desired) {
|
||||
return (m_v = desired);
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE T Load() const {
|
||||
return m_v.load(Order);
|
||||
}
|
||||
|
||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||
ALWAYS_INLINE void Store(T arg) {
|
||||
return m_v.store(Order);
|
||||
}
|
||||
|
||||
template<std::memory_order Order>
|
||||
ALWAYS_INLINE T Exchange(T arg) {
|
||||
return m_v.exchange(arg, Order);
|
||||
}
|
||||
|
||||
template<std::memory_order Order>
|
||||
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
||||
return m_v.compare_exchange_weak(expected, desired, Order);
|
||||
}
|
||||
|
||||
template<std::memory_order Order>
|
||||
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
||||
return m_v.compare_exchange_strong(expected, desired, Order);
|
||||
}
|
||||
|
||||
|
||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATION_LOWER_) \
|
||||
ALWAYS_INLINE T Fetch ## _OPERATION_(T arg) { \
|
||||
return m_v.fetch_##_OPERATION_LOWER_(arg); \
|
||||
}
|
||||
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, add)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, sub)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, and)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, or)
|
||||
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, xor)
|
||||
|
||||
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
|
||||
};
|
||||
|
||||
|
||||
}
|
28
libraries/libvapours/include/vapours/util/util_atomic.hpp
Normal file
28
libraries/libvapours/include/vapours/util/util_atomic.hpp
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (c) Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours/common.hpp>
|
||||
#include <vapours/assert.hpp>
|
||||
|
||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||
|
||||
#include <vapours/util/arch/arm64/util_atomic.hpp>
|
||||
|
||||
#else
|
||||
|
||||
#include <vapours/util/arch/generic/util_atomic.hpp>
|
||||
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue