18 #ifndef __MBED_ATOMIC_IMPL_H__ 19 #define __MBED_ATOMIC_IMPL_H__ 21 #ifndef __MBED_UTIL_ATOMIC_H__ 22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" 27 #include "platform/mbed_assert.h" 28 #include "platform/mbed_toolchain.h" 36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) 39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) 42 #define MBED_CHECK_CAS_ORDER(success, failure) \ 43 MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) 45 #define MBED_CHECK_LOAD_ORDER(order) (void)0 46 #define MBED_CHECK_STORE_ORDER(order) (void)0 47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0 52 #define MBED_ATOMIC_PTR_SIZE 32 54 #define MBED_ATOMIC_PTR_SIZE 64 58 #define MBED_ACQUIRE_BARRIER(order) do { \ 59 if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ 64 #define MBED_RELEASE_BARRIER(order) do { \ 65 if ((order) & mbed_memory_order_release) { \ 70 #define MBED_SEQ_CST_BARRIER(order) do { \ 71 if ((order) == mbed_memory_order_seq_cst) { \ 77 #if MBED_EXCLUSIVE_ACCESS 87 #if MBED_EXCLUSIVE_ACCESS_THUMB1 88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB 89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate 90 #define MBED_SUB3_IMM "L" // -7 to +7 92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers 93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate 94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate 97 #if defined __clang__ || defined __GNUC__ 98 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 100 ".syntax unified\n\t" \ 101 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 102 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 103 : [oldValue] "=&r" (oldValue), \ 104 [fail] "=&r" (fail), \ 105 [value] "+Q" (*valuePtr) \ 106 : [newValue] "r" (newValue) \ 109 #elif defined __ICCARM__ 111 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 113 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 114 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 115 : [oldValue] "=&r" (oldValue), \ 116 [fail] "=&r" (fail) \ 117 : [valuePtr] "r" (valuePtr), \ 118 [newValue] "r" (newValue) \ 123 #if defined __clang__ || defined __GNUC__ 124 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 126 ".syntax unified\n\t" \ 127 "LDREX"#M "\t%[newValue], %[value]\n\t" \ 128 #OP "\t%[newValue], %[arg]\n\t" \ 129 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 130 : [newValue] "=&" MBED_DOP_REG (newValue), \ 131 [fail] "=&r" (fail), \ 132 [value] "+Q" (*valuePtr) \ 133 : [arg] Constants MBED_DOP_REG (arg) \ 136 #elif defined __ICCARM__ 139 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 141 "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \ 142 #OP "\t%[newValue], %[newValue], %[arg]\n" \ 143 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 144 : [newValue] "=&r" (newValue), \ 145 [fail] "=&r" (fail) \ 146 : [valuePtr] "r" (valuePtr), \ 152 #if defined __clang__ || defined __GNUC__ 153 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 155 ".syntax unified\n\t" \ 156 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 157 #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ 158 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 159 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 160 [newValue] "=&" MBED_DOP_REG (newValue), \ 161 [fail] "=&r" (fail), \ 162 [value] "+Q" (*valuePtr) \ 163 : [arg] Constants MBED_DOP_REG (arg) \ 166 #elif defined __ICCARM__ 168 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 170 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 171 #OP "\t%[newValue], %[oldValue], %[arg]\n" \ 172 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 173 : [oldValue] "=&r" (oldValue), \ 174 [newValue] "=&r" (newValue), \ 175 [fail] "=&r" (fail) \ 176 : [valuePtr] "r" (valuePtr), \ 185 #if defined __clang__ || defined __GNUC__ 186 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 188 ".syntax unified\n\t" \ 189 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 190 "MOV" "\t%[newValue], %[oldValue]\n\t" \ 191 #OP "\t%[newValue], %[arg]\n\t" \ 192 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 193 : [oldValue] "=&r" (oldValue), \ 194 [newValue] "=&l" (newValue), \ 195 [fail] "=&r" (fail), \ 196 [value] "+Q" (*valuePtr) \ 197 : [arg] Constants "l" (arg) \ 200 #elif defined __ICCARM__ 201 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 203 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 204 "MOV" "\t%[newValue], %[oldValue]\n" \ 205 #OP "\t%[newValue], %[arg]\n" \ 206 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 207 : [oldValue] "=&r" (oldValue), \ 208 [newValue] "=&r" (newValue), \ 209 [fail] "=&r" (fail) \ 210 : [valuePtr] "r" (valuePtr), \ 221 #if MBED_EXCLUSIVE_ACCESS_ARM 222 #if defined __clang__ || defined __GNUC__ 223 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 225 ".syntax unified\n\t" \ 226 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 227 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 228 "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ 229 : [oldValue] "=&r" (oldValue), \ 230 [fail] "=&r" (fail), \ 231 [value] "+Q" (*ptr) \ 232 : [desiredValue] "r" (desiredValue), \ 233 [expectedValue] "ILr" (expectedValue) \ 236 #elif defined __ICCARM__ 237 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 239 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 240 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 241 "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 242 : [oldValue] "=&r" (oldValue), \ 243 [fail] "=&r" (fail) \ 244 : [desiredValue] "r" (desiredValue), \ 245 [expectedValue] "r" (expectedValue), \ 246 [valuePtr] "r" (ptr), \ 250 #else // MBED_EXCLUSIVE_ACCESS_ARM 251 #if defined __clang__ || defined __GNUC__ 252 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 254 ".syntax unified\n\t" \ 255 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 256 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 258 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ 260 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 261 [fail] "=&" MBED_DOP_REG (fail), \ 262 [value] "+Q" (*ptr) \ 263 : [desiredValue] "r" (desiredValue), \ 264 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 267 #elif defined __ICCARM__ 268 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 270 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 271 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 273 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 275 : [oldValue] "=&r" (oldValue), \ 276 [fail] "=&r" (fail) \ 277 : [desiredValue] "r" (desiredValue), \ 278 [expectedValue] "r" (expectedValue), \ 279 [valuePtr] "r" (ptr) \ 283 #endif // MBED_EXCLUSIVE_ACCESS_ARM 290 #if defined __clang__ || defined __GNUC__ 291 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 293 ".syntax unified\n\t" \ 295 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 296 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 298 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ 299 "CMP" "\t%[fail], #0\n\t" \ 302 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 303 [fail] "=&" MBED_DOP_REG (fail), \ 304 [value] "+Q" (*ptr) \ 305 : [desiredValue] "r" (desiredValue), \ 306 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 309 #elif defined __ICCARM__ 310 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 313 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 314 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 316 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 317 "CMP" "\t%[fail], #0\n" \ 320 : [oldValue] "=&r" (oldValue), \ 321 [fail] "=&r" (fail) \ 322 : [desiredValue] "r" (desiredValue), \ 323 [expectedValue] "r" (expectedValue), \ 324 [valuePtr] "r" (ptr) \ 343 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ 344 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ 350 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 356 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ 357 volatile T *valuePtr, T newValue, mbed_memory_order order) \ 361 MBED_RELEASE_BARRIER(order); \ 363 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 365 MBED_ACQUIRE_BARRIER(order); \ 369 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ 370 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 374 uint32_t fail, expectedValue = *expectedCurrentValue; \ 375 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 377 *expectedCurrentValue = oldValue; \ 383 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 385 MBED_CHECK_CAS_ORDER(success, failure); \ 386 MBED_RELEASE_BARRIER(success); \ 388 uint32_t fail, expectedValue = *expectedCurrentValue; \ 389 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 391 *expectedCurrentValue = oldValue; \ 393 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 397 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ 398 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 402 uint32_t fail, expectedValue = *expectedCurrentValue; \ 403 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 405 *expectedCurrentValue = oldValue; \ 411 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 413 MBED_CHECK_CAS_ORDER(success, failure); \ 414 MBED_RELEASE_BARRIER(success); \ 416 uint32_t fail, expectedValue = *expectedCurrentValue; \ 417 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 419 *expectedCurrentValue = oldValue; \ 421 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 426 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 427 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 429 uint32_t fail, newValue; \ 432 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 435 return (T) newValue; \ 438 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 439 volatile T *valuePtr, T arg, mbed_memory_order order) \ 441 uint32_t fail, newValue; \ 442 MBED_RELEASE_BARRIER(order); \ 444 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 446 MBED_ACQUIRE_BARRIER(order); \ 447 return (T) newValue; \ 450 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 451 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 454 uint32_t fail, newValue; \ 457 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 463 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 464 volatile T *valuePtr, T arg, mbed_memory_order order) \ 467 uint32_t fail, newValue; \ 468 MBED_RELEASE_BARRIER(order); \ 470 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 472 MBED_ACQUIRE_BARRIER(order); \ 476 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \ 477 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ 479 uint32_t fail, newValue; \ 482 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 488 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 489 volatile T *valuePtr, T arg, mbed_memory_order order) \ 492 uint32_t fail, newValue; \ 493 MBED_RELEASE_BARRIER(order); \ 495 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 497 MBED_ACQUIRE_BARRIER(order); \ 504 bool oldValue, newValue =
true;
507 DO_MBED_LOCKFREE_EXCHG_ASM(B);
515 MBED_RELEASE_BARRIER(order);
516 bool oldValue, newValue =
true;
519 DO_MBED_LOCKFREE_EXCHG_ASM(B);
521 MBED_ACQUIRE_BARRIER(order);
527 #define DO_MBED_LOCKFREE_EXCHG_OPS() \ 528 DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ 529 DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ 530 DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) 532 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \ 533 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 534 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 535 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, ) 537 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \ 538 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \ 539 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \ 540 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, ) 542 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \ 543 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 544 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 545 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, ) 547 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ 548 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ 549 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ 550 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) 552 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ 553 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ 554 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ 555 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) 566 #if !MBED_EXCLUSIVE_ACCESS_THUMB1 576 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"IL")
577 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
578 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
579 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
582 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
583 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
584 #if MBED_EXCLUSIVE_ACCESS_ARM 586 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"I")
587 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
590 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"IK")
591 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
594 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS,
"I")
595 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
596 #else // MBED_EXCLUSIVE_ACCESS_THUMB1 601 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"L")
602 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
603 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
604 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
605 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
606 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
607 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
608 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
609 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
610 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
613 DO_MBED_LOCKFREE_EXCHG_OPS()
614 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
615 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
617 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 618 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 619 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 620 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 621 #else // MBED_EXCLUSIVE_ACCESS 623 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 624 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ 625 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ 626 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ 627 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 628 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 629 DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ 630 DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ 631 DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ 632 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 638 #endif // MBED_EXCLUSIVE_ACCESS 644 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \ 645 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \ 647 T value = *valuePtr; \ 652 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \ 654 MBED_CHECK_LOAD_ORDER(order); \ 655 T value = *valuePtr; \ 656 MBED_ACQUIRE_BARRIER(order); \ 660 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \ 667 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \ 669 MBED_CHECK_STORE_ORDER(order); \ 670 MBED_RELEASE_BARRIER(order); \ 672 MBED_SEQ_CST_BARRIER(order); \ 678 flagPtr->_flag =
false;
684 MBED_CHECK_STORE_ORDER(order);
685 MBED_RELEASE_BARRIER(order);
686 flagPtr->_flag =
false;
687 MBED_SEQ_CST_BARRIER(order);
700 flagPtr->_flag =
false;
706 MBED_RELEASE_BARRIER(order);
707 flagPtr->_flag =
false;
708 MBED_SEQ_CST_BARRIER(order);
711 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
712 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
713 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
714 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
715 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
716 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
717 DO_MBED_LOCKFREE_LOADSTORE(
bool,,
bool)
718 DO_MBED_LOCKFREE_LOADSTORE(
void *,, ptr)
722 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,
volatile, u8)
723 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
724 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
725 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
726 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
727 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
728 DO_MBED_LOCKFREE_LOADSTORE(
bool, volatile,
bool)
729 DO_MBED_LOCKFREE_LOADSTORE(
void *, volatile, ptr)
747 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ 748 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ 749 T *expectedCurrentValue, T desiredValue) \ 751 return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ 752 (u##T *)expectedCurrentValue, (u##T)desiredValue); \ 755 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ 756 T *expectedCurrentValue, T desiredValue, \ 757 mbed_memory_order success, mbed_memory_order failure) \ 759 return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ 760 (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ 763 #define DO_MBED_SIGNED_CAS_OPS(name) \ 764 DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ 765 DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ 766 DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ 767 DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) 769 DO_MBED_SIGNED_CAS_OPS(cas)
770 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
784 #if MBED_ATOMIC_PTR_SIZE == 32 786 (
volatile uint32_t *)ptr,
787 (uint32_t *)expectedCurrentValue,
788 (uint32_t)desiredValue);
791 (
volatile uint64_t *)ptr,
792 (uint64_t *)expectedCurrentValue,
793 (uint64_t)desiredValue);
799 #if MBED_ATOMIC_PTR_SIZE == 32 801 (
volatile uint32_t *)ptr,
802 (uint32_t *)expectedCurrentValue,
803 (uint32_t)desiredValue,
807 (
volatile uint64_t *)ptr,
808 (uint64_t *)expectedCurrentValue,
809 (uint64_t)desiredValue,
826 #if MBED_ATOMIC_PTR_SIZE == 32 828 (
volatile uint32_t *)ptr,
829 (uint32_t *)expectedCurrentValue,
830 (uint32_t)desiredValue);
833 (
volatile uint64_t *)ptr,
834 (uint64_t *)expectedCurrentValue,
835 (uint64_t)desiredValue);
841 #if MBED_ATOMIC_PTR_SIZE == 32 843 (
volatile uint32_t *)ptr,
844 (uint32_t *)expectedCurrentValue,
845 (uint32_t)desiredValue,
849 (
volatile uint64_t *)ptr,
850 (uint64_t *)expectedCurrentValue,
851 (uint64_t)desiredValue,
856 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ 857 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ 859 return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ 862 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ 863 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ 865 return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ 868 #define DO_MBED_SIGNED_FETCH_OPS(name) \ 869 DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ 870 DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ 871 DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ 872 DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) 874 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ 875 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ 876 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ 877 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ 878 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) 880 DO_MBED_SIGNED_FETCH_OPS(exchange)
881 DO_MBED_SIGNED_FETCH_OPS(incr)
882 DO_MBED_SIGNED_FETCH_OPS(decr)
883 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
884 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
886 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
887 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
888 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
902 #if MBED_ATOMIC_PTR_SIZE == 32 911 #if MBED_ATOMIC_PTR_SIZE == 32 920 #if MBED_ATOMIC_PTR_SIZE == 32 929 #if MBED_ATOMIC_PTR_SIZE == 32 938 #if MBED_ATOMIC_PTR_SIZE == 32 947 #if MBED_ATOMIC_PTR_SIZE == 32 956 #if MBED_ATOMIC_PTR_SIZE == 32 965 #if MBED_ATOMIC_PTR_SIZE == 32 977 MBED_CHECK_LOAD_ORDER(order);
983 MBED_CHECK_LOAD_ORDER(order);
989 MBED_CHECK_STORE_ORDER(order);
995 MBED_CHECK_STORE_ORDER(order);
999 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ 1000 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 1001 volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ 1003 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1006 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ 1007 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ 1008 volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ 1009 MBED_UNUSED mbed_memory_order success, \ 1010 MBED_UNUSED mbed_memory_order failure) \ 1012 MBED_CHECK_CAS_ORDER(success, failure); \ 1013 return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1016 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1017 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1018 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1019 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1020 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1021 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1022 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1023 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1034 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ 1036 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \ 1038 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1042 inline T core_util_atomic_load(const T *valuePtr) noexcept \ 1044 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1048 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \ 1050 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1054 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \ 1056 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1059 template<
typename T>
1060 inline T *core_util_atomic_load(T *
const volatile *valuePtr) noexcept
1065 template<
typename T>
1066 inline T *core_util_atomic_load(T *
const *valuePtr) noexcept
1071 template<
typename T>
1072 inline T *core_util_atomic_load_explicit(T *
const volatile *valuePtr,
mbed_memory_order order) noexcept
1077 template<
typename T>
1078 inline T *core_util_atomic_load_explicit(T *
const *valuePtr,
mbed_memory_order order) noexcept
1083 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1084 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1085 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1086 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1087 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1088 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1089 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1090 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1091 DO_MBED_ATOMIC_LOAD_TEMPLATE(
bool,
bool)
1093 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ 1095 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \ 1097 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1101 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \ 1103 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1107 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \ 1109 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1113 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \ 1115 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1118 template<
typename T>
1119 inline void core_util_atomic_store(T *
volatile *valuePtr, T *val) noexcept
1124 template<
typename T>
1125 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1130 template<
typename T>
1131 inline void core_util_atomic_store_explicit(T *
volatile *valuePtr, T *val,
mbed_memory_order order) noexcept
1136 template<
typename T>
1137 inline void core_util_atomic_store_explicit(T **valuePtr, T *val,
mbed_memory_order order) noexcept
1142 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1143 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1144 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1145 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1146 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1147 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1148 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1149 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1150 DO_MBED_ATOMIC_STORE_TEMPLATE(
bool,
bool)
1152 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ 1154 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \ 1156 return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1159 template<
typename T>
1160 inline bool core_util_atomic_compare_exchange_strong(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1165 template<
typename T>
1166 inline bool core_util_atomic_compare_exchange_weak(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1171 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ 1172 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ 1173 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ 1174 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ 1175 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ 1176 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ 1177 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ 1178 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ 1179 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ 1180 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) 1182 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1183 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1185 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ 1187 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1189 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1193 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1194 mbed_memory_order order) noexcept \ 1196 return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ 1201 inline bool core_util_atomic_exchange(
volatile bool *valuePtr,
bool arg) noexcept
1207 inline bool core_util_atomic_exchange_explicit(
volatile bool *valuePtr,
bool arg,
mbed_memory_order order) noexcept
1212 template<
typename T>
1213 inline T *core_util_atomic_exchange(T *
volatile *valuePtr, T *arg) noexcept
1218 template<
typename T>
1219 inline T *core_util_atomic_exchange_explicit(T *
volatile *valuePtr, T *arg,
mbed_memory_order order) noexcept
1224 template<
typename T>
1225 inline T *core_util_atomic_fetch_add(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1230 template<
typename T>
1231 inline T *core_util_atomic_fetch_add_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1236 template<
typename T>
1237 inline T *core_util_atomic_fetch_sub(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1242 template<
typename T>
1243 inline T *core_util_atomic_fetch_sub_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1249 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ 1250 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1251 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1252 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1253 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) 1255 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ 1256 DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ 1257 DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ 1258 DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ 1259 DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) 1261 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \ 1263 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1265 return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \ 1269 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1270 mbed_memory_order order) noexcept \ 1272 return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \ 1275 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1276 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1277 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1278 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1279 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1280 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1281 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1282 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1283 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1289 #if MBED_EXCLUSIVE_ACCESS 1290 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1291 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1292 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1293 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1294 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1295 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1296 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1298 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1299 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1300 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1301 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \ 1302 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \ 1303 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \ 1304 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1308 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1309 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1310 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1311 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1312 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1318 #endif // __cplusplus 1322 #undef MBED_SUB3_IMM 1323 #undef DO_MBED_LOCKFREE_EXCHG_ASM 1324 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM 1325 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM 1326 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM 1327 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM 1328 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM 1329 #undef DO_MBED_LOCKFREE_LOADSTORE 1330 #undef DO_MBED_LOCKFREE_EXCHG_OP 1331 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP 1332 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP 1333 #undef DO_MBED_LOCKFREE_NEWVAL_2OP 1334 #undef DO_MBED_LOCKFREE_OLDVAL_2OP 1335 #undef DO_MBED_LOCKFREE_OLDVAL_3OP 1336 #undef DO_MBED_LOCKFREE_EXCHG_OPS 1337 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS 1338 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS 1339 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS 1340 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS 1341 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS 1342 #undef DO_MBED_SIGNED_CAS_OP 1343 #undef DO_MBED_SIGNED_CAS_OPS 1344 #undef DO_MBED_SIGNED_FETCH_OP 1345 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP 1346 #undef DO_MBED_SIGNED_FETCH_OPS 1347 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS 1348 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS 1349 #undef DO_MBED_LOCKED_CAS_ORDERINGS 1350 #undef MBED_ACQUIRE_BARRIER 1351 #undef MBED_RELEASE_BARRIER 1352 #undef MBED_SEQ_CST_BARRIER 1353 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE 1354 #undef DO_MBED_ATOMIC_STORE_TEMPLATE 1355 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE 1356 #undef DO_MBED_ATOMIC_CAS_TEMPLATE 1357 #undef DO_MBED_ATOMIC_CAS_TEMPLATES 1358 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE 1359 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES 1360 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
A lock-free, primitive atomic flag.