19 #ifndef __MBED_UTIL_ATOMIC_H__ 20 #define __MBED_UTIL_ATOMIC_H__ 27 #include "mbed_toolchain.h" 57 mbed_memory_order_relaxed = 0x00,
58 mbed_memory_order_consume = 0x01,
59 mbed_memory_order_acquire = 0x02,
60 mbed_memory_order_release = 0x04,
61 mbed_memory_order_acq_rel = 0x06,
62 mbed_memory_order_seq_cst = 0x16
66 #ifndef MBED_EXCLUSIVE_ACCESS 67 #ifndef __EXCLUSIVE_ACCESS 68 #if defined __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH 69 #if ((__ARM_ARCH_7M__ == 1U) || \ 70 (__ARM_ARCH_7EM__ == 1U) || \ 71 (__ARM_ARCH_8M_BASE__ == 1U) || \ 72 (__ARM_ARCH_8M_MAIN__ == 1U)) || \ 73 (__ARM_ARCH_7A__ == 1U) 74 #define MBED_EXCLUSIVE_ACCESS 1U 75 #define MBED_EXCLUSIVE_ACCESS_THUMB1 (__ARM_ARCH_8M_BASE__ == 1U) 78 #define MBED_EXCLUSIVE_ACCESS_ARM 1U 80 #define MBED_EXCLUSIVE_ACCESS_ARM 0U 83 #if !defined (__thumb__) 84 #define MBED_EXCLUSIVE_ACCESS_ARM 1U 86 #define MBED_EXCLUSIVE_ACCESS_ARM 0U 89 #elif (__ARM_ARCH_6M__ == 1U) 90 #define MBED_EXCLUSIVE_ACCESS 0U 92 #error "Unknown ARM architecture for exclusive access" 93 #endif // __ARM_ARCH_xxx 94 #else // __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH 96 #define MBED_EXCLUSIVE_ACCESS 0U 99 #define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS 103 #if MBED_EXCLUSIVE_ACCESS 104 #define MBED_INLINE_IF_EX inline 106 #define MBED_INLINE_IF_EX 127 #define CORE_UTIL_ATOMIC_FLAG_INIT { 0 } 216 MBED_INLINE_IF_EX
bool core_util_atomic_cas_u8(
volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
225 MBED_INLINE_IF_EX
bool core_util_atomic_cas_u16(
volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
231 MBED_INLINE_IF_EX
bool core_util_atomic_cas_u32(
volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
898 template<
typename T> T core_util_atomic_load(
const volatile T *valuePtr);
900 template<
typename T> T core_util_atomic_load(
const T *valuePtr);
902 template<
typename T>
void core_util_atomic_store(
volatile T *valuePtr, T desiredValue);
904 template<
typename T>
void core_util_atomic_store(T *valuePtr, T desiredValue);
906 template<
typename T> T core_util_atomic_exchange(
volatile T *ptr, T desiredValue);
908 template<
typename T>
bool core_util_atomic_compare_exchange_strong(
volatile T *ptr, T *expectedCurrentValue, T desiredValue);
910 template<
typename T>
bool core_util_atomic_compare_exchange_weak(
volatile T *ptr, T *expectedCurrentValue, T desiredValue);
912 template<
typename T> T core_util_atomic_fetch_add(
volatile T *valuePtr, T arg);
914 template<
typename T> T core_util_atomic_fetch_sub(
volatile T *valuePtr, T arg);
916 template<
typename T> T core_util_atomic_fetch_and(
volatile T *valuePtr, T arg);
918 template<
typename T> T core_util_atomic_fetch_or(
volatile T *valuePtr, T arg);
920 template<
typename T> T core_util_atomic_fetch_xor(
volatile T *valuePtr, T arg);
923 template<
typename T> T core_util_atomic_load_explicit(
const volatile T *valuePtr,
mbed_memory_order order);
925 template<
typename T> T core_util_atomic_load_explicit(
const T *valuePtr,
mbed_memory_order order);
927 template<
typename T>
void core_util_atomic_store_explicit(
volatile T *valuePtr, T desiredValue,
mbed_memory_order order);
929 template<
typename T>
void core_util_atomic_store_explicit(T *valuePtr, T desiredValue,
mbed_memory_order order);
931 template<
typename T> T core_util_atomic_exchange_explicit(
volatile T *ptr, T desiredValue,
mbed_memory_order order);
933 template<
typename T>
bool core_util_atomic_compare_exchange_strong_explicit(
volatile T *ptr, T *expectedCurrentValue, T desiredValue,
mbed_memory_order success,
mbed_memory_order failure);
935 template<
typename T>
bool core_util_atomic_compare_exchange_weak_explicit(
volatile T *ptr, T *expectedCurrentValue, T desiredValue,
mbed_memory_order success,
mbed_memory_order failure);
937 template<
typename T> T core_util_atomic_fetch_add_explicit(
volatile T *valuePtr, T arg,
mbed_memory_order order);
939 template<
typename T> T core_util_atomic_fetch_sub_explicit(
volatile T *valuePtr, T arg,
mbed_memory_order order);
941 template<
typename T> T core_util_atomic_fetch_and_explicit(
volatile T *valuePtr, T arg,
mbed_memory_order order);
943 template<
typename T> T core_util_atomic_fetch_or_explicit(
volatile T *valuePtr, T arg,
mbed_memory_order order);
945 template<
typename T> T core_util_atomic_fetch_xor_explicit(
volatile T *valuePtr, T arg,
mbed_memory_order order);
948 template<
typename T>
inline T *core_util_atomic_load(T *
const volatile *valuePtr);
950 template<
typename T>
inline T *core_util_atomic_load(T *
const *valuePtr);
952 template<
typename T>
inline void core_util_atomic_store(T *
volatile *valuePtr, T *desiredValue);
954 template<
typename T>
inline void core_util_atomic_store(T **valuePtr, T *desiredValue);
956 template<
typename T>
inline T *core_util_atomic_exchange(T *
volatile *valuePtr, T *desiredValue);
958 template<
typename T>
inline bool core_util_atomic_compare_exchange_strong(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue);
960 template<
typename T>
inline bool core_util_atomic_compare_exchange_weak(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue);
962 template<
typename T>
inline T *core_util_atomic_fetch_add(T *
volatile *valuePtr, ptrdiff_t arg);
964 template<
typename T>
inline T *core_util_atomic_fetch_sub(T *
volatile *valuePtr, ptrdiff_t arg);
967 template<
typename T>
inline T *core_util_atomic_load_explicit(T *
const volatile *valuePtr,
mbed_memory_order order);
969 template<
typename T>
inline T *core_util_atomic_load_explicit(T *
const *valuePtr,
mbed_memory_order order);
971 template<
typename T>
inline void core_util_atomic_store_explicit(T *
volatile *valuePtr, T *desiredValue,
mbed_memory_order order);
973 template<
typename T>
inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue,
mbed_memory_order order);
975 template<
typename T>
inline T *core_util_atomic_exchange_explicit(T *
volatile *valuePtr, T *desiredValue,
mbed_memory_order order);
977 template<
typename T>
inline bool core_util_atomic_compare_exchange_strong_explicit(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue,
mbed_memory_order success,
mbed_memory_order failure);
979 template<
typename T>
inline bool core_util_atomic_compare_exchange_weak_explicit(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue,
mbed_memory_order success,
mbed_memory_order failure);
981 template<
typename T>
inline T *core_util_atomic_fetch_add_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order);
983 template<
typename T>
inline T *core_util_atomic_fetch_sub_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order);
985 #endif // __cplusplus 992 #include "platform/internal/mbed_atomic_impl.h" 994 #endif // __MBED_UTIL_ATOMICL_H__
A lock-free, primitive atomic flag.