18 #ifndef __MBED_ATOMIC_IMPL_H__ 19 #define __MBED_ATOMIC_IMPL_H__ 21 #ifndef __MBED_UTIL_ATOMIC_H__ 22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" 27 #include "platform/mbed_assert.h" 28 #include "platform/mbed_toolchain.h" 36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) 39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) 42 #define MBED_CHECK_CAS_ORDER(success, failure) \ 43 MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) 45 #define MBED_CHECK_LOAD_ORDER(order) (void)0 46 #define MBED_CHECK_STORE_ORDER(order) (void)0 47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0 52 #define MBED_ATOMIC_PTR_SIZE 32 54 #define MBED_ATOMIC_PTR_SIZE 64 58 #define MBED_ACQUIRE_BARRIER(order) do { \ 59 if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ 64 #define MBED_RELEASE_BARRIER(order) do { \ 65 if ((order) & mbed_memory_order_release) { \ 70 #define MBED_SEQ_CST_BARRIER(order) do { \ 71 if ((order) == mbed_memory_order_seq_cst) { \ 77 #if MBED_EXCLUSIVE_ACCESS 87 #if MBED_EXCLUSIVE_ACCESS_THUMB1 88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB 89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate 90 #define MBED_SUB3_IMM "L" // -7 to +7 92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers 93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate 94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate 102 #pragma diag_suppress 3732 103 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 105 LDREX##M oldValue, [valuePtr] ; \ 106 STREX##M fail, newValue, [valuePtr] \ 108 #elif defined __clang__ || defined __GNUC__ 109 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 111 ".syntax unified\n\t" \ 112 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 113 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 114 : [oldValue] "=&r" (oldValue), \ 115 [fail] "=&r" (fail), \ 116 [value] "+Q" (*valuePtr) \ 117 : [newValue] "r" (newValue) \ 120 #elif defined __ICCARM__ 122 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 124 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 125 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 126 : [oldValue] "=&r" (oldValue), \ 127 [fail] "=&r" (fail) \ 128 : [valuePtr] "r" (valuePtr), \ 129 [newValue] "r" (newValue) \ 135 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 137 LDREX##M newValue, [valuePtr] ; \ 139 STREX##M fail, newValue, [valuePtr] \ 141 #elif defined __clang__ || defined __GNUC__ 142 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 144 ".syntax unified\n\t" \ 145 "LDREX"#M "\t%[newValue], %[value]\n\t" \ 146 #OP "\t%[newValue], %[arg]\n\t" \ 147 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 148 : [newValue] "=&" MBED_DOP_REG (newValue), \ 149 [fail] "=&r" (fail), \ 150 [value] "+Q" (*valuePtr) \ 151 : [arg] Constants MBED_DOP_REG (arg) \ 154 #elif defined __ICCARM__ 157 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \ 159 "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \ 160 #OP "\t%[newValue], %[newValue], %[arg]\n" \ 161 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 162 : [newValue] "=&r" (newValue), \ 163 [fail] "=&r" (fail) \ 164 : [valuePtr] "r" (valuePtr), \ 171 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 173 LDREX##M oldValue, [valuePtr] ; \ 174 OP newValue, oldValue, arg ; \ 175 STREX##M fail, newValue, [valuePtr] \ 177 #elif defined __clang__ || defined __GNUC__ 178 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 180 ".syntax unified\n\t" \ 181 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 182 #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ 183 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 184 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 185 [newValue] "=&" MBED_DOP_REG (newValue), \ 186 [fail] "=&r" (fail), \ 187 [value] "+Q" (*valuePtr) \ 188 : [arg] Constants MBED_DOP_REG (arg) \ 191 #elif defined __ICCARM__ 193 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \ 195 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 196 #OP "\t%[newValue], %[oldValue], %[arg]\n" \ 197 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 198 : [oldValue] "=&r" (oldValue), \ 199 [newValue] "=&r" (newValue), \ 200 [fail] "=&r" (fail) \ 201 : [valuePtr] "r" (valuePtr), \ 211 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 213 LDREX##M oldValue, [valuePtr] ; \ 214 MOV newValue, oldValue ; \ 216 STREX##M fail, newValue, [valuePtr] \ 218 #elif defined __clang__ || defined __GNUC__ 219 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 221 ".syntax unified\n\t" \ 222 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 223 "MOV" "\t%[newValue], %[oldValue]\n\t" \ 224 #OP "\t%[newValue], %[arg]\n\t" \ 225 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 226 : [oldValue] "=&r" (oldValue), \ 227 [newValue] "=&l" (newValue), \ 228 [fail] "=&r" (fail), \ 229 [value] "+Q" (*valuePtr) \ 230 : [arg] Constants "l" (arg) \ 233 #elif defined __ICCARM__ 234 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \ 236 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 237 "MOV" "\t%[newValue], %[oldValue]\n" \ 238 #OP "\t%[newValue], %[arg]\n" \ 239 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 240 : [oldValue] "=&r" (oldValue), \ 241 [newValue] "=&r" (newValue), \ 242 [fail] "=&r" (fail) \ 243 : [valuePtr] "r" (valuePtr), \ 254 #if MBED_EXCLUSIVE_ACCESS_ARM 256 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 258 LDREX##M oldValue, [ptr] ; \ 259 SUBS fail, oldValue, expectedValue ; \ 260 STREX##M##EQ fail, desiredValue, [ptr] \ 262 #elif defined __clang__ || defined __GNUC__ 263 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 265 ".syntax unified\n\t" \ 266 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 267 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 268 "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ 269 : [oldValue] "=&r" (oldValue), \ 270 [fail] "=&r" (fail), \ 271 [value] "+Q" (*ptr) \ 272 : [desiredValue] "r" (desiredValue), \ 273 [expectedValue] "ILr" (expectedValue) \ 276 #elif defined __ICCARM__ 277 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 279 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 280 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 281 "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 282 : [oldValue] "=&r" (oldValue), \ 283 [fail] "=&r" (fail) \ 284 : [desiredValue] "r" (desiredValue), \ 285 [expectedValue] "r" (expectedValue), \ 286 [valuePtr] "r" (ptr), \ 290 #else // MBED_EXCLUSIVE_ACCESS_ARM 292 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 294 LDREX##M oldValue, [ptr] ; \ 295 SUBS fail, oldValue, expectedValue ; \ 297 STREX##M fail, desiredValue, [ptr] ; \ 300 #elif defined __clang__ || defined __GNUC__ 301 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 303 ".syntax unified\n\t" \ 304 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 305 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 307 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ 309 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 310 [fail] "=&" MBED_DOP_REG (fail), \ 311 [value] "+Q" (*ptr) \ 312 : [desiredValue] "r" (desiredValue), \ 313 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 316 #elif defined __ICCARM__ 317 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 319 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 320 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 322 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 324 : [oldValue] "=&r" (oldValue), \ 325 [fail] "=&r" (fail) \ 326 : [desiredValue] "r" (desiredValue), \ 327 [expectedValue] "r" (expectedValue), \ 328 [valuePtr] "r" (ptr) \ 332 #endif // MBED_EXCLUSIVE_ACCESS_ARM 340 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 343 LDREX##M oldValue, [ptr] ; \ 344 SUBS fail, oldValue, expectedValue ; \ 346 STREX##M fail, desiredValue, [ptr] ; \ 351 #elif defined __clang__ || defined __GNUC__ 352 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 354 ".syntax unified\n\t" \ 356 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 357 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 359 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ 360 "CMP" "\t%[fail], #0\n\t" \ 363 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 364 [fail] "=&" MBED_DOP_REG (fail), \ 365 [value] "+Q" (*ptr) \ 366 : [desiredValue] "r" (desiredValue), \ 367 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 370 #elif defined __ICCARM__ 371 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 374 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 375 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 377 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 378 "CMP" "\t%[fail], #0\n" \ 381 : [oldValue] "=&r" (oldValue), \ 382 [fail] "=&r" (fail) \ 383 : [desiredValue] "r" (desiredValue), \ 384 [expectedValue] "r" (expectedValue), \ 385 [valuePtr] "r" (ptr) \ 404 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ 405 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ 410 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 415 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ 416 volatile T *valuePtr, T newValue, mbed_memory_order order) \ 420 MBED_RELEASE_BARRIER(order); \ 421 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 422 MBED_ACQUIRE_BARRIER(order); \ 426 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ 427 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 431 uint32_t fail, expectedValue = *expectedCurrentValue; \ 432 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 434 *expectedCurrentValue = oldValue; \ 440 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 442 MBED_CHECK_CAS_ORDER(success, failure); \ 443 MBED_RELEASE_BARRIER(success); \ 445 uint32_t fail, expectedValue = *expectedCurrentValue; \ 446 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 448 *expectedCurrentValue = oldValue; \ 450 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 454 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ 455 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 459 uint32_t fail, expectedValue = *expectedCurrentValue; \ 460 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 462 *expectedCurrentValue = oldValue; \ 468 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 470 MBED_CHECK_CAS_ORDER(success, failure); \ 471 MBED_RELEASE_BARRIER(success); \ 473 uint32_t fail, expectedValue = *expectedCurrentValue; \ 474 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 476 *expectedCurrentValue = oldValue; \ 478 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 483 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 484 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 486 uint32_t fail, newValue; \ 489 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 492 return (T) newValue; \ 495 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 496 volatile T *valuePtr, T arg, mbed_memory_order order) \ 498 uint32_t fail, newValue; \ 499 MBED_RELEASE_BARRIER(order); \ 501 DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \ 503 MBED_ACQUIRE_BARRIER(order); \ 504 return (T) newValue; \ 507 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \ 508 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 511 uint32_t fail, newValue; \ 514 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 520 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 521 volatile T *valuePtr, T arg, mbed_memory_order order) \ 524 uint32_t fail, newValue; \ 525 MBED_RELEASE_BARRIER(order); \ 527 DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \ 529 MBED_ACQUIRE_BARRIER(order); \ 533 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \ 534 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ 536 uint32_t fail, newValue; \ 539 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 545 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 546 volatile T *valuePtr, T arg, mbed_memory_order order) \ 549 uint32_t fail, newValue; \ 550 MBED_RELEASE_BARRIER(order); \ 552 DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \ 554 MBED_ACQUIRE_BARRIER(order); \ 561 bool oldValue, newValue =
true;
564 DO_MBED_LOCKFREE_EXCHG_ASM(B);
572 MBED_RELEASE_BARRIER(order);
573 bool oldValue, newValue =
true;
576 DO_MBED_LOCKFREE_EXCHG_ASM(B);
578 MBED_ACQUIRE_BARRIER(order);
584 #define DO_MBED_LOCKFREE_EXCHG_OPS() \ 585 DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ 586 DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ 587 DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) 589 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \ 590 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 591 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 592 DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, ) 594 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \ 595 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \ 596 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \ 597 DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, ) 599 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \ 600 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \ 601 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \ 602 DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, ) 604 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ 605 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ 606 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ 607 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) 609 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ 610 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ 611 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ 612 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) 623 #if !MBED_EXCLUSIVE_ACCESS_THUMB1 633 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"IL")
634 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
635 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
636 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
639 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
640 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
641 #if MBED_EXCLUSIVE_ACCESS_ARM 643 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"I")
644 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
647 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS,
"IK")
648 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
651 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS,
"I")
652 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
653 #else // MBED_EXCLUSIVE_ACCESS_THUMB1 658 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS,
"L")
659 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
660 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
661 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
662 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
663 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
664 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
665 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
666 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
667 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
670 DO_MBED_LOCKFREE_EXCHG_OPS()
671 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
672 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
674 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 675 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 676 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 677 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 678 #else // MBED_EXCLUSIVE_ACCESS 680 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 681 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ 682 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ 683 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ 684 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 685 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 686 DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ 687 DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ 688 DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ 689 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 695 #endif // MBED_EXCLUSIVE_ACCESS 701 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \ 702 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \ 704 T value = *valuePtr; \ 709 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \ 711 MBED_CHECK_LOAD_ORDER(order); \ 712 T value = *valuePtr; \ 713 MBED_ACQUIRE_BARRIER(order); \ 717 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \ 724 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \ 726 MBED_CHECK_STORE_ORDER(order); \ 727 MBED_RELEASE_BARRIER(order); \ 729 MBED_SEQ_CST_BARRIER(order); \ 735 flagPtr->_flag =
false;
741 MBED_CHECK_STORE_ORDER(order);
742 MBED_RELEASE_BARRIER(order);
743 flagPtr->_flag =
false;
744 MBED_SEQ_CST_BARRIER(order);
757 flagPtr->_flag =
false;
763 MBED_RELEASE_BARRIER(order);
764 flagPtr->_flag =
false;
765 MBED_SEQ_CST_BARRIER(order);
768 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
769 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
770 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
771 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
772 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
773 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
774 DO_MBED_LOCKFREE_LOADSTORE(
bool,,
bool)
775 DO_MBED_LOCKFREE_LOADSTORE(
void *,, ptr)
779 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,
volatile, u8)
780 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
781 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
782 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
783 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
784 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
785 DO_MBED_LOCKFREE_LOADSTORE(
bool, volatile,
bool)
786 DO_MBED_LOCKFREE_LOADSTORE(
void *, volatile, ptr)
804 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ 805 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ 806 T *expectedCurrentValue, T desiredValue) \ 808 return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ 809 (u##T *)expectedCurrentValue, (u##T)desiredValue); \ 812 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ 813 T *expectedCurrentValue, T desiredValue, \ 814 mbed_memory_order success, mbed_memory_order failure) \ 816 return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ 817 (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ 820 #define DO_MBED_SIGNED_CAS_OPS(name) \ 821 DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ 822 DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ 823 DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ 824 DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) 826 DO_MBED_SIGNED_CAS_OPS(cas)
827 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
841 #if MBED_ATOMIC_PTR_SIZE == 32 843 (
volatile uint32_t *)ptr,
844 (uint32_t *)expectedCurrentValue,
845 (uint32_t)desiredValue);
848 (
volatile uint64_t *)ptr,
849 (uint64_t *)expectedCurrentValue,
850 (uint64_t)desiredValue);
856 #if MBED_ATOMIC_PTR_SIZE == 32 858 (
volatile uint32_t *)ptr,
859 (uint32_t *)expectedCurrentValue,
860 (uint32_t)desiredValue,
864 (
volatile uint64_t *)ptr,
865 (uint64_t *)expectedCurrentValue,
866 (uint64_t)desiredValue,
883 #if MBED_ATOMIC_PTR_SIZE == 32 885 (
volatile uint32_t *)ptr,
886 (uint32_t *)expectedCurrentValue,
887 (uint32_t)desiredValue);
890 (
volatile uint64_t *)ptr,
891 (uint64_t *)expectedCurrentValue,
892 (uint64_t)desiredValue);
898 #if MBED_ATOMIC_PTR_SIZE == 32 900 (
volatile uint32_t *)ptr,
901 (uint32_t *)expectedCurrentValue,
902 (uint32_t)desiredValue,
906 (
volatile uint64_t *)ptr,
907 (uint64_t *)expectedCurrentValue,
908 (uint64_t)desiredValue,
913 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ 914 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ 916 return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ 919 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ 920 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ 922 return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ 925 #define DO_MBED_SIGNED_FETCH_OPS(name) \ 926 DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ 927 DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ 928 DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ 929 DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) 931 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ 932 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ 933 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ 934 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ 935 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) 937 DO_MBED_SIGNED_FETCH_OPS(exchange)
938 DO_MBED_SIGNED_FETCH_OPS(incr)
939 DO_MBED_SIGNED_FETCH_OPS(decr)
940 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
941 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
943 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
944 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
945 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
959 #if MBED_ATOMIC_PTR_SIZE == 32 968 #if MBED_ATOMIC_PTR_SIZE == 32 977 #if MBED_ATOMIC_PTR_SIZE == 32 986 #if MBED_ATOMIC_PTR_SIZE == 32 995 #if MBED_ATOMIC_PTR_SIZE == 32 1004 #if MBED_ATOMIC_PTR_SIZE == 32 1013 #if MBED_ATOMIC_PTR_SIZE == 32 1022 #if MBED_ATOMIC_PTR_SIZE == 32 1034 MBED_CHECK_LOAD_ORDER(order);
1040 MBED_CHECK_LOAD_ORDER(order);
1046 MBED_CHECK_STORE_ORDER(order);
1052 MBED_CHECK_STORE_ORDER(order);
1056 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ 1057 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 1058 volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ 1060 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1063 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ 1064 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ 1065 volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ 1066 MBED_UNUSED mbed_memory_order success, \ 1067 MBED_UNUSED mbed_memory_order failure) \ 1069 MBED_CHECK_CAS_ORDER(success, failure); \ 1070 return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1073 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1074 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1075 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1076 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1077 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1078 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1079 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1080 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1091 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ 1093 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \ 1095 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1099 inline T core_util_atomic_load(const T *valuePtr) noexcept \ 1101 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1105 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \ 1107 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1111 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \ 1113 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1116 template<
typename T>
1117 inline T *core_util_atomic_load(T *
const volatile *valuePtr) noexcept
1122 template<
typename T>
1123 inline T *core_util_atomic_load(T *
const *valuePtr) noexcept
1128 template<
typename T>
1129 inline T *core_util_atomic_load_explicit(T *
const volatile *valuePtr,
mbed_memory_order order) noexcept
1134 template<
typename T>
1135 inline T *core_util_atomic_load_explicit(T *
const *valuePtr,
mbed_memory_order order) noexcept
1140 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1141 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1142 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1143 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1144 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1145 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1146 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1147 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1148 DO_MBED_ATOMIC_LOAD_TEMPLATE(
bool,
bool)
1150 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ 1152 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \ 1154 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1158 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \ 1160 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1164 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \ 1166 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1170 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \ 1172 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1175 template<
typename T>
1176 inline void core_util_atomic_store(T *
volatile *valuePtr, T *val) noexcept
1181 template<
typename T>
1182 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1187 template<
typename T>
1188 inline void core_util_atomic_store_explicit(T *
volatile *valuePtr, T *val,
mbed_memory_order order) noexcept
1193 template<
typename T>
1194 inline void core_util_atomic_store_explicit(T **valuePtr, T *val,
mbed_memory_order order) noexcept
1199 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1200 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1201 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1202 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1203 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1204 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1205 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1206 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1207 DO_MBED_ATOMIC_STORE_TEMPLATE(
bool,
bool)
1209 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ 1211 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \ 1213 return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1216 template<
typename T>
1217 inline bool core_util_atomic_compare_exchange_strong(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1222 template<
typename T>
1223 inline bool core_util_atomic_compare_exchange_weak(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1228 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ 1229 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ 1230 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ 1231 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ 1232 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ 1233 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ 1234 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ 1235 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ 1236 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ 1237 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) 1239 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1240 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1242 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ 1244 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1246 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1250 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1251 mbed_memory_order order) noexcept \ 1253 return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ 1258 inline bool core_util_atomic_exchange(
volatile bool *valuePtr,
bool arg) noexcept
1264 inline bool core_util_atomic_exchange_explicit(
volatile bool *valuePtr,
bool arg,
mbed_memory_order order) noexcept
1269 template<
typename T>
1270 inline T *core_util_atomic_exchange(T *
volatile *valuePtr, T *arg) noexcept
1275 template<
typename T>
1276 inline T *core_util_atomic_exchange_explicit(T *
volatile *valuePtr, T *arg,
mbed_memory_order order) noexcept
1281 template<
typename T>
1282 inline T *core_util_atomic_fetch_add(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1287 template<
typename T>
1288 inline T *core_util_atomic_fetch_add_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1293 template<
typename T>
1294 inline T *core_util_atomic_fetch_sub(T *
volatile *valuePtr, ptrdiff_t arg) noexcept
1299 template<
typename T>
1300 inline T *core_util_atomic_fetch_sub_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order) noexcept
1306 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ 1307 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1308 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1309 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1310 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) 1312 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ 1313 DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ 1314 DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ 1315 DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ 1316 DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) 1318 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \ 1320 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \ 1322 return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \ 1326 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1327 mbed_memory_order order) noexcept \ 1329 return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \ 1332 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1333 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1334 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1335 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1336 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1337 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1338 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1339 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1340 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1346 #if MBED_EXCLUSIVE_ACCESS 1347 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1348 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1349 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1350 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1351 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1352 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1353 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1355 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \ 1356 template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \ 1357 template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \ 1358 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \ 1359 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \ 1360 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \ 1361 DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP) 1365 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1366 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1367 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1368 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1369 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1375 #endif // __cplusplus 1379 #undef MBED_SUB3_IMM 1380 #undef DO_MBED_LOCKFREE_EXCHG_ASM 1381 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM 1382 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM 1383 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM 1384 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM 1385 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM 1386 #undef DO_MBED_LOCKFREE_LOADSTORE 1387 #undef DO_MBED_LOCKFREE_EXCHG_OP 1388 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP 1389 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP 1390 #undef DO_MBED_LOCKFREE_NEWVAL_2OP 1391 #undef DO_MBED_LOCKFREE_OLDVAL_2OP 1392 #undef DO_MBED_LOCKFREE_OLDVAL_3OP 1393 #undef DO_MBED_LOCKFREE_EXCHG_OPS 1394 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS 1395 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS 1396 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS 1397 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS 1398 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS 1399 #undef DO_MBED_SIGNED_CAS_OP 1400 #undef DO_MBED_SIGNED_CAS_OPS 1401 #undef DO_MBED_SIGNED_FETCH_OP 1402 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP 1403 #undef DO_MBED_SIGNED_FETCH_OPS 1404 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS 1405 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS 1406 #undef DO_MBED_LOCKED_CAS_ORDERINGS 1407 #undef MBED_ACQUIRE_BARRIER 1408 #undef MBED_RELEASE_BARRIER 1409 #undef MBED_SEQ_CST_BARRIER 1410 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE 1411 #undef DO_MBED_ATOMIC_STORE_TEMPLATE 1412 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE 1413 #undef DO_MBED_ATOMIC_CAS_TEMPLATE 1414 #undef DO_MBED_ATOMIC_CAS_TEMPLATES 1415 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE 1416 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES 1417 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
A lock-free, primitive atomic flag.