18 #ifndef __MBED_ATOMIC_IMPL_H__ 19 #define __MBED_ATOMIC_IMPL_H__ 21 #ifndef __MBED_UTIL_ATOMIC_H__ 22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" 27 #include "platform/mbed_assert.h" 28 #include "platform/mbed_toolchain.h" 36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) 39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) 42 #define MBED_CHECK_CAS_ORDER(success, failure) \ 43 MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) 45 #define MBED_CHECK_LOAD_ORDER(order) (void)0 46 #define MBED_CHECK_STORE_ORDER(order) (void)0 47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0 52 #define MBED_ATOMIC_PTR_SIZE 32 54 #define MBED_ATOMIC_PTR_SIZE 64 58 #define MBED_ACQUIRE_BARRIER(order) do { \ 59 if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ 64 #define MBED_RELEASE_BARRIER(order) do { \ 65 if ((order) & mbed_memory_order_release) { \ 70 #define MBED_SEQ_CST_BARRIER(order) do { \ 71 if ((order) == mbed_memory_order_seq_cst) { \ 77 #if MBED_EXCLUSIVE_ACCESS 87 #if MBED_EXCLUSIVE_ACCESS_THUMB1 88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB 89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate 90 #define MBED_SUB3_IMM "L" // -7 to +7 92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers 93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate 94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate 97 #if defined __clang__ || defined __GNUC__ 98 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 100 ".syntax unified\n\t" \ 101 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 102 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 103 : [oldValue] "=&r" (oldValue), \ 104 [fail] "=&r" (fail), \ 105 [value] "+Q" (*valuePtr) \ 106 : [newValue] "r" (newValue) \ 109 #elif defined __ICCARM__ 111 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 113 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 114 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 115 : [oldValue] "=&r" (oldValue), \ 116 [fail] "=&r" (fail) \ 117 : [valuePtr] "r" (valuePtr), \ 118 [newValue] "r" (newValue) \ 123 #if defined __clang__ || defined __GNUC__ 124 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 126 ".syntax unified\n\t" \ 127 "LDREX"#M "\t%[newValue], %[value]\n\t" \ 128 #OP "\t%[newValue], %[arg]\n\t" \ 129 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 130 : [newValue] "=&" MBED_DOP_REG (newValue), \ 131 [fail] "=&r" (fail), \ 132 [value] "+Q" (*valuePtr) \ 133 : [arg] Constants MBED_DOP_REG (arg) \ 136 #elif defined __ICCARM__ 139 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 141 "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \ 142 #OP "\t%[newValue], %[newValue], %[arg]\n" \ 143 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 144 : [newValue] "=&r" (newValue), \ 145 [fail] "=&r" (fail) \ 146 : [valuePtr] "r" (valuePtr), \ 152 #if defined __clang__ || defined __GNUC__ 153 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 155 ".syntax unified\n\t" \ 156 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 157 #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ 158 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 159 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 160 [newValue] "=&" MBED_DOP_REG (newValue), \ 161 [fail] "=&r" (fail), \ 162 [value] "+Q" (*valuePtr) \ 163 : [arg] Constants MBED_DOP_REG (arg) \ 166 #elif defined __ICCARM__ 168 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 170 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 171 #OP "\t%[newValue], %[oldValue], %[arg]\n" \ 172 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 173 : [oldValue] "=&r" (oldValue), \ 174 [newValue] "=&r" (newValue), \ 175 [fail] "=&r" (fail) \ 176 : [valuePtr] "r" (valuePtr), \ 185 #if defined __clang__ || defined __GNUC__ 186 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 188 ".syntax unified\n\t" \ 189 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 190 "MOV" "\t%[newValue], %[oldValue]\n\t" \ 191 #OP "\t%[newValue], %[arg]\n\t" \ 192 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 193 : [oldValue] "=&r" (oldValue), \ 194 [newValue] "=&l" (newValue), \ 195 [fail] "=&r" (fail), \ 196 [value] "+Q" (*valuePtr) \ 197 : [arg] Constants "l" (arg) \ 200 #elif defined __ICCARM__ 201 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 203 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 204 "MOV" "\t%[newValue], %[oldValue]\n" \ 205 #OP "\t%[newValue], %[arg]\n" \ 206 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 207 : [oldValue] "=&r" (oldValue), \ 208 [newValue] "=&r" (newValue), \ 209 [fail] "=&r" (fail) \ 210 : [valuePtr] "r" (valuePtr), \ 221 #if MBED_EXCLUSIVE_ACCESS_ARM 222 #if defined __clang__ || defined __GNUC__ 223 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 225 ".syntax unified\n\t" \ 226 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 227 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 228 "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ 229 : [oldValue] "=&r" (oldValue), \ 230 [fail] "=&r" (fail), \ 231 [value] "+Q" (*ptr) \ 232 : [desiredValue] "r" (desiredValue), \ 233 [expectedValue] "ILr" (expectedValue) \ 236 #elif defined __ICCARM__ 237 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 239 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 240 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 241 "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 242 : [oldValue] "=&r" (oldValue), \ 243 [fail] "=&r" (fail) \ 244 : [desiredValue] "r" (desiredValue), \ 245 [expectedValue] "r" (expectedValue), \ 246 [valuePtr] "r" (ptr), \ 250 #else // MBED_EXCLUSIVE_ACCESS_ARM 251 #if defined __clang__ || defined __GNUC__ 252 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 254 ".syntax unified\n\t" \ 255 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 256 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 258 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ 260 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 261 [fail] "=&" MBED_DOP_REG (fail), \ 262 [value] "+Q" (*ptr) \ 263 : [desiredValue] "r" (desiredValue), \ 264 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 267 #elif defined __ICCARM__ 268 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 270 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 271 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 273 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 275 : [oldValue] "=&r" (oldValue), \ 276 [fail] "=&r" (fail) \ 277 : [desiredValue] "r" (desiredValue), \ 278 [expectedValue] "r" (expectedValue), \ 279 [valuePtr] "r" (ptr) \ 283 #endif // MBED_EXCLUSIVE_ACCESS_ARM 290 #if defined __clang__ || defined __GNUC__ 291 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 293 ".syntax unified\n\t" \ 295 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 296 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 298 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ 299 "CMP" "\t%[fail], #0\n\t" \ 302 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 303 [fail] "=&" MBED_DOP_REG (fail), \ 304 [value] "+Q" (*ptr) \ 305 : [desiredValue] "r" (desiredValue), \ 306 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 309 #elif defined __ICCARM__ 310 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 313 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 314 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 316 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 317 "CMP" "\t%[fail], #0\n" \ 320 : [oldValue] "=&r" (oldValue), \ 321 [fail] "=&r" (fail) \ 322 : [desiredValue] "r" (desiredValue), \ 323 [expectedValue] "r" (expectedValue), \ 324 [valuePtr] "r" (ptr) \ 343 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ 344 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ 349 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 354 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ 355 volatile T *valuePtr, T newValue, mbed_memory_order order) \ 359 MBED_RELEASE_BARRIER(order); \ 360 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 361 MBED_ACQUIRE_BARRIER(order); \ 365 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ 366 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 370 uint32_t fail, expectedValue = *expectedCurrentValue; \ 371 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 373 *expectedCurrentValue = oldValue; \ 379 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 381 MBED_CHECK_CAS_ORDER(success, failure); \ 382 MBED_RELEASE_BARRIER(success); \ 384 uint32_t fail, expectedValue = *expectedCurrentValue; \ 385 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 387 *expectedCurrentValue = oldValue; \ 389 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 393 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ 394 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 398 uint32_t fail, expectedValue = *expectedCurrentValue; \ 399 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 401 *expectedCurrentValue = oldValue; \ 407 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 409 MBED_CHECK_CAS_ORDER(success, failure); \ 410 MBED_RELEASE_BARRIER(success); \ 412 uint32_t fail, expectedValue = *expectedCurrentValue; \ 413 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 415 *expectedCurrentValue = oldValue; \ 417 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 422 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 423 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 425 uint32_t fail, newValue; \ 428 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 431 return (T) newValue; \ 434 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 435 volatile T *valuePtr, T arg, mbed_memory_order order) \ 437 uint32_t fail, newValue; \ 438 MBED_RELEASE_BARRIER(order); \ 440 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 442 MBED_ACQUIRE_BARRIER(order); \ 443 return (T) newValue; \ 446 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 447 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 450 uint32_t fail, newValue; \ 453 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 459 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 460 volatile T *valuePtr, T arg, mbed_memory_order order) \ 463 uint32_t fail, newValue; \ 464 MBED_RELEASE_BARRIER(order); \ 466 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 468 MBED_ACQUIRE_BARRIER(order); \ 472 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \ 473 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ 475 uint32_t fail, newValue; \ 478 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 484 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 485 volatile T *valuePtr, T arg, mbed_memory_order order) \ 488 uint32_t fail, newValue; \ 489 MBED_RELEASE_BARRIER(order); \ 491 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 493 MBED_ACQUIRE_BARRIER(order); \ 500 bool oldValue, newValue =
true;
503 DO_MBED_LOCKFREE_EXCHG_ASM(B);
511 MBED_RELEASE_BARRIER(order);
512 bool oldValue, newValue =
true;
515 DO_MBED_LOCKFREE_EXCHG_ASM(B);
517 MBED_ACQUIRE_BARRIER(order);
523 #define DO_MBED_LOCKFREE_EXCHG_OPS() \ 524 DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ 525 DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ 526 DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) 528 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \ 529 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 530 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 531 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, ) 533 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \ 534 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \ 535 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \ 536 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, ) 538 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \ 539 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 540 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 541 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, ) 543 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ 544 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ 545 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ 546 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) 548 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ 549 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ 550 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ 551 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) 562 #if !MBED_EXCLUSIVE_ACCESS_THUMB1 572 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"IL")
573 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
574 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
575 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
578 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
579 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
580 #if MBED_EXCLUSIVE_ACCESS_ARM 582 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"I")
583 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
586 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"IK")
587 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
590 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS,
"I")
591 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
592 #else // MBED_EXCLUSIVE_ACCESS_THUMB1 597 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"L")
598 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
599 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
600 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
601 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
602 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
603 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
604 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
605 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
606 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
609 DO_MBED_LOCKFREE_EXCHG_OPS()
610 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
611 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
613 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 614 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 615 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 616 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 617 #else // MBED_EXCLUSIVE_ACCESS 619 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 620 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ 621 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ 622 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ 623 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 624 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 625 DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ 626 DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ 627 DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ 628 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 634 #endif // MBED_EXCLUSIVE_ACCESS 640 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \ 641 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \ 643 T value = *valuePtr; \ 648 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \ 650 MBED_CHECK_LOAD_ORDER(order); \ 651 T value = *valuePtr; \ 652 MBED_ACQUIRE_BARRIER(order); \ 656 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \ 663 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \ 665 MBED_CHECK_STORE_ORDER(order); \ 666 MBED_RELEASE_BARRIER(order); \ 668 MBED_SEQ_CST_BARRIER(order); \ 674 flagPtr->_flag =
false;
680 MBED_CHECK_STORE_ORDER(order);
681 MBED_RELEASE_BARRIER(order);
682 flagPtr->_flag =
false;
683 MBED_SEQ_CST_BARRIER(order);
696 flagPtr->_flag =
false;
702 MBED_RELEASE_BARRIER(order);
703 flagPtr->_flag =
false;
704 MBED_SEQ_CST_BARRIER(order);
707 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
708 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
709 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
710 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
711 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
712 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
713 DO_MBED_LOCKFREE_LOADSTORE(
bool,,
bool)
714 DO_MBED_LOCKFREE_LOADSTORE(
void *,, ptr)
718 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,
volatile, u8)
719 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
720 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
721 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
722 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
723 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
724 DO_MBED_LOCKFREE_LOADSTORE(
bool, volatile,
bool)
725 DO_MBED_LOCKFREE_LOADSTORE(
void *, volatile, ptr)
743 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ 744 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ 745 T *expectedCurrentValue, T desiredValue) \ 747 return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ 748 (u##T *)expectedCurrentValue, (u##T)desiredValue); \ 751 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ 752 T *expectedCurrentValue, T desiredValue, \ 753 mbed_memory_order success, mbed_memory_order failure) \ 755 return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ 756 (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ 759 #define DO_MBED_SIGNED_CAS_OPS(name) \ 760 DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ 761 DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ 762 DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ 763 DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) 765 DO_MBED_SIGNED_CAS_OPS(cas)
766 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
780 #if MBED_ATOMIC_PTR_SIZE == 32 782 (
volatile uint32_t *)ptr,
783 (uint32_t *)expectedCurrentValue,
784 (uint32_t)desiredValue);
787 (
volatile uint64_t *)ptr,
788 (uint64_t *)expectedCurrentValue,
789 (uint64_t)desiredValue);
795 #if MBED_ATOMIC_PTR_SIZE == 32 797 (
volatile uint32_t *)ptr,
798 (uint32_t *)expectedCurrentValue,
799 (uint32_t)desiredValue,
803 (
volatile uint64_t *)ptr,
804 (uint64_t *)expectedCurrentValue,
805 (uint64_t)desiredValue,
822 #if MBED_ATOMIC_PTR_SIZE == 32 824 (
volatile uint32_t *)ptr,
825 (uint32_t *)expectedCurrentValue,
826 (uint32_t)desiredValue);
829 (
volatile uint64_t *)ptr,
830 (uint64_t *)expectedCurrentValue,
831 (uint64_t)desiredValue);
837 #if MBED_ATOMIC_PTR_SIZE == 32 839 (
volatile uint32_t *)ptr,
840 (uint32_t *)expectedCurrentValue,
841 (uint32_t)desiredValue,
845 (
volatile uint64_t *)ptr,
846 (uint64_t *)expectedCurrentValue,
847 (uint64_t)desiredValue,
852 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ 853 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ 855 return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ 858 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ 859 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ 861 return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ 864 #define DO_MBED_SIGNED_FETCH_OPS(name) \ 865 DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ 866 DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ 867 DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ 868 DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) 870 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ 871 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ 872 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ 873 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ 874 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) 876 DO_MBED_SIGNED_FETCH_OPS(exchange)
877 DO_MBED_SIGNED_FETCH_OPS(incr)
878 DO_MBED_SIGNED_FETCH_OPS(decr)
879 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
880 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
882 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
883 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
884 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
898 #if MBED_ATOMIC_PTR_SIZE == 32 907 #if MBED_ATOMIC_PTR_SIZE == 32 916 #if MBED_ATOMIC_PTR_SIZE == 32 925 #if MBED_ATOMIC_PTR_SIZE == 32 934 #if MBED_ATOMIC_PTR_SIZE == 32 943 #if MBED_ATOMIC_PTR_SIZE == 32 952 #if MBED_ATOMIC_PTR_SIZE == 32 961 #if MBED_ATOMIC_PTR_SIZE == 32 973 MBED_CHECK_LOAD_ORDER(order);
979 MBED_CHECK_LOAD_ORDER(order);
985 MBED_CHECK_STORE_ORDER(order);
991 MBED_CHECK_STORE_ORDER(order);
995 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ 996 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 997 volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ 999 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1002 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ 1003 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ 1004 volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ 1005 MBED_UNUSED mbed_memory_order success, \ 1006 MBED_UNUSED mbed_memory_order failure) \ 1008 MBED_CHECK_CAS_ORDER(success, failure); \ 1009 return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1012 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1013 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1014 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1015 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1016 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1017 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1018 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1019 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1030 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ 1032 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \ 1034 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1038 inline T core_util_atomic_load(const T *valuePtr) noexcept \ 1040 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1044 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \ 1046 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1050 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \ 1052 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1055 template<
typename T>
1056 inline T *core_util_atomic_load(T *
const volatile *valuePtr) noexcept
1061 template<
typename T>
1062 inline T *core_util_atomic_load(T *
const *valuePtr) noexcept
1067 template<
typename T>
1068 inline T *core_util_atomic_load_explicit(T *
const volatile *valuePtr,
mbed_memory_order order) noexcept
1073 template<
typename T>
1074 inline T *core_util_atomic_load_explicit(T *
const *valuePtr,
mbed_memory_order order) noexcept
1079 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1080 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1081 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1082 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1083 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1084 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1085 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1086 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1087 DO_MBED_ATOMIC_LOAD_TEMPLATE(
bool,
bool)
1089 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ 1091 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \ 1093 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1097 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \ 1099 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1103 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \ 1105 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1109 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \ 1111 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1114 template<
typename T>
1115 inline void core_util_atomic_store(T *
volatile *valuePtr, T *val) noexcept
1120 template<
typename T>
1121 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1126 template<
typename T>
1127 inline void core_util_atomic_store_explicit(T *
volatile *valuePtr, T *val,
mbed_memory_order order) noexcept
1132 template<
typename T>
1133 inline void core_util_atomic_store_explicit(T **valuePtr, T *val,
mbed_memory_order order) noexcept
1138 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1139 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1140 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1141 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1142 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1143 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1144 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1145 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1146 DO_MBED_ATOMIC_STORE_TEMPLATE(
bool,
bool)
1148 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ 1150 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \ 1152 return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1155 template<
typename T>
1156 inline bool core_util_atomic_compare_exchange_strong(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1161 template<
typename T>
1162 inline bool core_util_atomic_compare_exchange_weak(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1167 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ 1168 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ 1169 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ 1170 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ 1171 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ 1172 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ 1173 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ 1174 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ 1175 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ 1176 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) 1178 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1179 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1181 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ 1183 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1185 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1189 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1190 mbed_memory_order order) noexcept \ 1192 return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ 1197 inline bool core_util_atomic_exchange(
volatile bool *valuePtr,
bool arg) noexcept
1203 inline bool core_util_atomic_exchange_explicit(
volatile bool *valuePtr,
bool arg,
mbed_memory_order order) noexcept
1208 template<
typename T>
1209 inline T *core_util_atomic_exchange(T *
volatile *valuePtr, T *arg) noexcept
1214 template<
typename T>
1215 inline T *core_util_atomic_exchange_explicit(T *
volatile *valuePtr, T *arg,
mbed_memory_order order) noexcept
1220 template<
typename T>
1221 inline T *core_util_atomic_fetch_add(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1226 template<
typename T>
1227 inline T *core_util_atomic_fetch_add_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1232 template<
typename T>
1233 inline T *core_util_atomic_fetch_sub(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1238 template<
typename T>
1239 inline T *core_util_atomic_fetch_sub_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1245 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ 1246 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1247 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1248 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1249 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) 1251 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ 1252 DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ 1253 DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ 1254 DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ 1255 DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) 1257 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \ 1259 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1261 return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \ 1265 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1266 mbed_memory_order order) noexcept \ 1268 return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \ 1271 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1272 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1273 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1274 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1275 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1276 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1277 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1278 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1279 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1285 #if MBED_EXCLUSIVE_ACCESS 1286 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1287 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1288 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1289 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1290 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1291 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1292 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1294 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1295 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1296 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1297 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \ 1298 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \ 1299 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \ 1300 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1304 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1305 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1306 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1307 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1308 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1314 #endif // __cplusplus 1318 #undef MBED_SUB3_IMM 1319 #undef DO_MBED_LOCKFREE_EXCHG_ASM 1320 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM 1321 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM 1322 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM 1323 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM 1324 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM 1325 #undef DO_MBED_LOCKFREE_LOADSTORE 1326 #undef DO_MBED_LOCKFREE_EXCHG_OP 1327 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP 1328 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP 1329 #undef DO_MBED_LOCKFREE_NEWVAL_2OP 1330 #undef DO_MBED_LOCKFREE_OLDVAL_2OP 1331 #undef DO_MBED_LOCKFREE_OLDVAL_3OP 1332 #undef DO_MBED_LOCKFREE_EXCHG_OPS 1333 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS 1334 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS 1335 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS 1336 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS 1337 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS 1338 #undef DO_MBED_SIGNED_CAS_OP 1339 #undef DO_MBED_SIGNED_CAS_OPS 1340 #undef DO_MBED_SIGNED_FETCH_OP 1341 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP 1342 #undef DO_MBED_SIGNED_FETCH_OPS 1343 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS 1344 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS 1345 #undef DO_MBED_LOCKED_CAS_ORDERINGS 1346 #undef MBED_ACQUIRE_BARRIER 1347 #undef MBED_RELEASE_BARRIER 1348 #undef MBED_SEQ_CST_BARRIER 1349 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE 1350 #undef DO_MBED_ATOMIC_STORE_TEMPLATE 1351 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE 1352 #undef DO_MBED_ATOMIC_CAS_TEMPLATE 1353 #undef DO_MBED_ATOMIC_CAS_TEMPLATES 1354 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE 1355 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES 1356 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
A lock-free, primitive atomic flag.