18 #ifndef __MBED_ATOMIC_IMPL_H__ 19 #define __MBED_ATOMIC_IMPL_H__ 21 #ifndef __MBED_UTIL_ATOMIC_H__ 22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h" 27 #include "platform/mbed_assert.h" 28 #include "platform/mbed_toolchain.h" 36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel) 39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel) 42 #define MBED_CHECK_CAS_ORDER(success, failure) \ 43 MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel) 45 #define MBED_CHECK_LOAD_ORDER(order) (void)0 46 #define MBED_CHECK_STORE_ORDER(order) (void)0 47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0 52 #define MBED_ATOMIC_PTR_SIZE 32 54 #define MBED_ATOMIC_PTR_SIZE 64 58 #define MBED_ACQUIRE_BARRIER(order) do { \ 59 if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \ 64 #define MBED_RELEASE_BARRIER(order) do { \ 65 if ((order) & mbed_memory_order_release) { \ 70 #define MBED_SEQ_CST_BARRIER(order) do { \ 71 if ((order) == mbed_memory_order_seq_cst) { \ 77 #if MBED_EXCLUSIVE_ACCESS 87 #if MBED_EXCLUSIVE_ACCESS_THUMB1 88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB 89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate 90 #define MBED_SUB3_IMM "L" // -7 to +7 92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers 93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate 94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate 102 #pragma diag_suppress 3732 103 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 105 LDREX##M oldValue, [valuePtr] ; \ 106 STREX##M fail, newValue, [valuePtr] \ 108 #elif defined __clang__ || defined __GNUC__ 109 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 111 ".syntax unified\n\t" \ 112 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 113 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 114 : [oldValue] "=&r" (oldValue), \ 115 [fail] "=&r" (fail), \ 116 [value] "+Q" (*valuePtr) \ 117 : [newValue] "r" (newValue) \ 120 #elif defined __ICCARM__ 122 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \ 124 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 125 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 126 : [oldValue] "=&r" (oldValue), \ 127 [fail] "=&r" (fail) \ 128 : [valuePtr] "r" (valuePtr), \ 129 [newValue] "r" (newValue) \ 135 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ 137 LDREX##M oldValue, [valuePtr] ; \ 138 OP newValue, oldValue, arg ; \ 139 STREX##M fail, newValue, [valuePtr] \ 141 #elif defined __clang__ || defined __GNUC__ 142 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ 144 ".syntax unified\n\t" \ 145 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 146 #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \ 147 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 148 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 149 [newValue] "=&" MBED_DOP_REG (newValue), \ 150 [fail] "=&r" (fail), \ 151 [value] "+Q" (*valuePtr) \ 152 : [arg] Constants MBED_DOP_REG (arg) \ 155 #elif defined __ICCARM__ 157 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \ 159 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 160 #OP "\t%[newValue], %[oldValue], %[arg]\n" \ 161 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 162 : [oldValue] "=&r" (oldValue), \ 163 [newValue] "=&r" (newValue), \ 164 [fail] "=&r" (fail) \ 165 : [valuePtr] "r" (valuePtr), \ 175 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ 177 LDREX##M oldValue, [valuePtr] ; \ 178 MOV newValue, oldValue ; \ 180 STREX##M fail, newValue, [valuePtr] \ 182 #elif defined __clang__ || defined __GNUC__ 183 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ 185 ".syntax unified\n\t" \ 186 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 187 "MOV" "\t%[newValue], %[oldValue]\n\t" \ 188 #OP "\t%[newValue], %[arg]\n\t" \ 189 "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \ 190 : [oldValue] "=&r" (oldValue), \ 191 [newValue] "=&l" (newValue), \ 192 [fail] "=&r" (fail), \ 193 [value] "+Q" (*valuePtr) \ 194 : [arg] Constants "l" (arg) \ 197 #elif defined __ICCARM__ 198 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \ 200 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 201 "MOV" "\t%[newValue], %[oldValue]\n" \ 202 #OP "\t%[newValue], %[arg]\n" \ 203 "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \ 204 : [oldValue] "=&r" (oldValue), \ 205 [newValue] "=&r" (newValue), \ 206 [fail] "=&r" (fail) \ 207 : [valuePtr] "r" (valuePtr), \ 218 #if MBED_EXCLUSIVE_ACCESS_ARM 220 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 222 LDREX##M oldValue, [ptr] ; \ 223 SUBS fail, oldValue, expectedValue ; \ 224 STREX##M##EQ fail, desiredValue, [ptr] \ 226 #elif defined __clang__ || defined __GNUC__ 227 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 229 ".syntax unified\n\t" \ 230 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 231 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 232 "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \ 233 : [oldValue] "=&r" (oldValue), \ 234 [fail] "=&r" (fail), \ 235 [value] "+Q" (*ptr) \ 236 : [desiredValue] "r" (desiredValue), \ 237 [expectedValue] "ILr" (expectedValue) \ 240 #elif defined __ICCARM__ 241 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 243 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 244 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 245 "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 246 : [oldValue] "=&r" (oldValue), \ 247 [fail] "=&r" (fail) \ 248 : [desiredValue] "r" (desiredValue), \ 249 [expectedValue] "r" (expectedValue), \ 250 [valuePtr] "r" (ptr), \ 254 #else // MBED_EXCLUSIVE_ACCESS_ARM 256 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 258 LDREX##M oldValue, [ptr] ; \ 259 SUBS fail, oldValue, expectedValue ; \ 261 STREX##M fail, desiredValue, [ptr] ; \ 264 #elif defined __clang__ || defined __GNUC__ 265 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 267 ".syntax unified\n\t" \ 268 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 269 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 271 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \ 273 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 274 [fail] "=&" MBED_DOP_REG (fail), \ 275 [value] "+Q" (*ptr) \ 276 : [desiredValue] "r" (desiredValue), \ 277 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 280 #elif defined __ICCARM__ 281 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \ 283 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 284 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 286 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 288 : [oldValue] "=&r" (oldValue), \ 289 [fail] "=&r" (fail) \ 290 : [desiredValue] "r" (desiredValue), \ 291 [expectedValue] "r" (expectedValue), \ 292 [valuePtr] "r" (ptr) \ 296 #endif // MBED_EXCLUSIVE_ACCESS_ARM 304 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 307 LDREX##M oldValue, [ptr] ; \ 308 SUBS fail, oldValue, expectedValue ; \ 310 STREX##M fail, desiredValue, [ptr] ; \ 315 #elif defined __clang__ || defined __GNUC__ 316 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 318 ".syntax unified\n\t" \ 320 "LDREX"#M "\t%[oldValue], %[value]\n\t" \ 321 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\ 323 "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \ 324 "CMP" "\t%[fail], #0\n\t" \ 327 : [oldValue] "=&" MBED_DOP_REG (oldValue), \ 328 [fail] "=&" MBED_DOP_REG (fail), \ 329 [value] "+Q" (*ptr) \ 330 : [desiredValue] "r" (desiredValue), \ 331 [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \ 334 #elif defined __ICCARM__ 335 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \ 338 "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \ 339 "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \ 341 "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\ 342 "CMP" "\t%[fail], #0\n" \ 345 : [oldValue] "=&r" (oldValue), \ 346 [fail] "=&r" (fail) \ 347 : [desiredValue] "r" (desiredValue), \ 348 [expectedValue] "r" (expectedValue), \ 349 [valuePtr] "r" (ptr) \ 368 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \ 369 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \ 374 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 379 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \ 380 volatile T *valuePtr, T newValue, mbed_memory_order order) \ 384 MBED_RELEASE_BARRIER(order); \ 385 DO_MBED_LOCKFREE_EXCHG_ASM(M); \ 386 MBED_ACQUIRE_BARRIER(order); \ 390 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \ 391 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 395 uint32_t fail, expectedValue = *expectedCurrentValue; \ 396 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 398 *expectedCurrentValue = oldValue; \ 404 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 406 MBED_CHECK_CAS_ORDER(success, failure); \ 407 MBED_RELEASE_BARRIER(success); \ 409 uint32_t fail, expectedValue = *expectedCurrentValue; \ 410 DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \ 412 *expectedCurrentValue = oldValue; \ 414 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 418 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \ 419 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 423 uint32_t fail, expectedValue = *expectedCurrentValue; \ 424 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 426 *expectedCurrentValue = oldValue; \ 432 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \ 434 MBED_CHECK_CAS_ORDER(success, failure); \ 435 MBED_RELEASE_BARRIER(success); \ 437 uint32_t fail, expectedValue = *expectedCurrentValue; \ 438 DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \ 440 *expectedCurrentValue = oldValue; \ 442 MBED_ACQUIRE_BARRIER(fail ? failure : success); \ 447 #define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \ 448 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \ 451 uint32_t fail, newValue; \ 454 DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ 457 return (T) retValue; \ 460 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 461 volatile T *valuePtr, T arg, mbed_memory_order order) \ 464 uint32_t fail, newValue; \ 465 MBED_RELEASE_BARRIER(order); \ 467 DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \ 469 MBED_ACQUIRE_BARRIER(order); \ 470 return (T) retValue; \ 473 #define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \ 474 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \ 476 uint32_t fail, newValue; \ 479 DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ 482 return (T) retValue; \ 485 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 486 volatile T *valuePtr, T arg, mbed_memory_order order) \ 489 uint32_t fail, newValue; \ 490 MBED_RELEASE_BARRIER(order); \ 492 DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \ 494 MBED_ACQUIRE_BARRIER(order); \ 495 return (T) retValue; \ 501 bool oldValue, newValue =
true;
504 DO_MBED_LOCKFREE_EXCHG_ASM(B);
512 MBED_RELEASE_BARRIER(order);
513 bool oldValue, newValue =
true;
516 DO_MBED_LOCKFREE_EXCHG_ASM(B);
518 MBED_ACQUIRE_BARRIER(order);
524 #define DO_MBED_LOCKFREE_EXCHG_OPS() \ 525 DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \ 526 DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \ 527 DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, ) 529 #define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \ 530 DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \ 531 DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \ 532 DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, ) 534 #define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \ 535 DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \ 536 DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \ 537 DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, ) 539 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \ 540 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \ 541 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \ 542 DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, ) 544 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \ 545 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \ 546 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \ 547 DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, ) 553 #if !MBED_EXCLUSIVE_ACCESS_THUMB1 562 DO_MBED_LOCKFREE_3OPS(incr, ADDS,
"IL", newValue)
563 DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue)
565 DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue)
566 DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue)
569 DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue)
570 #if MBED_EXCLUSIVE_ACCESS_ARM 572 DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS,
"I", oldValue)
575 DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS,
"IK", oldValue)
578 DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS,
"I", oldValue)
579 #else // MBED_EXCLUSIVE_ACCESS_THUMB1 582 DO_MBED_LOCKFREE_3OPS(incr, ADDS,
"L", newValue)
583 DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue)
584 DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue)
585 DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue)
586 DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue)
587 DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue)
588 DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue)
591 DO_MBED_LOCKFREE_EXCHG_OPS()
592 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
593 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
595 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 596 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 597 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 598 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 599 #else // MBED_EXCLUSIVE_ACCESS 601 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \ 602 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \ 603 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \ 604 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \ 605 DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64) 606 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \ 607 DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \ 608 DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \ 609 DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \ 610 DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64) 616 #endif // MBED_EXCLUSIVE_ACCESS 622 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \ 623 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \ 625 T value = *valuePtr; \ 630 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \ 632 MBED_CHECK_LOAD_ORDER(order); \ 633 T value = *valuePtr; \ 634 MBED_ACQUIRE_BARRIER(order); \ 638 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \ 645 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \ 647 MBED_CHECK_STORE_ORDER(order); \ 648 MBED_RELEASE_BARRIER(order); \ 650 MBED_SEQ_CST_BARRIER(order); \ 656 flagPtr->_flag =
false;
662 MBED_CHECK_STORE_ORDER(order);
663 MBED_RELEASE_BARRIER(order);
664 flagPtr->_flag =
false;
665 MBED_SEQ_CST_BARRIER(order);
678 flagPtr->_flag =
false;
684 MBED_RELEASE_BARRIER(order);
685 flagPtr->_flag =
false;
686 MBED_SEQ_CST_BARRIER(order);
689 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
690 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
691 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
692 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
693 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
694 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
695 DO_MBED_LOCKFREE_LOADSTORE(
bool,,
bool)
696 DO_MBED_LOCKFREE_LOADSTORE(
void *,, ptr)
700 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,
volatile, u8)
701 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
702 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
703 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
704 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
705 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
706 DO_MBED_LOCKFREE_LOADSTORE(
bool, volatile,
bool)
707 DO_MBED_LOCKFREE_LOADSTORE(
void *, volatile, ptr)
725 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \ 726 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \ 727 T *expectedCurrentValue, T desiredValue) \ 729 return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \ 730 (u##T *)expectedCurrentValue, (u##T)desiredValue); \ 733 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \ 734 T *expectedCurrentValue, T desiredValue, \ 735 mbed_memory_order success, mbed_memory_order failure) \ 737 return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \ 738 (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \ 741 #define DO_MBED_SIGNED_CAS_OPS(name) \ 742 DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \ 743 DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \ 744 DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \ 745 DO_MBED_SIGNED_CAS_OP(name, int64_t, 64) 747 DO_MBED_SIGNED_CAS_OPS(cas)
748 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
762 #if MBED_ATOMIC_PTR_SIZE == 32 764 (
volatile uint32_t *)ptr,
765 (uint32_t *)expectedCurrentValue,
766 (uint32_t)desiredValue);
769 (
volatile uint64_t *)ptr,
770 (uint64_t *)expectedCurrentValue,
771 (uint64_t)desiredValue);
777 #if MBED_ATOMIC_PTR_SIZE == 32 779 (
volatile uint32_t *)ptr,
780 (uint32_t *)expectedCurrentValue,
781 (uint32_t)desiredValue,
785 (
volatile uint64_t *)ptr,
786 (uint64_t *)expectedCurrentValue,
787 (uint64_t)desiredValue,
804 #if MBED_ATOMIC_PTR_SIZE == 32 806 (
volatile uint32_t *)ptr,
807 (uint32_t *)expectedCurrentValue,
808 (uint32_t)desiredValue);
811 (
volatile uint64_t *)ptr,
812 (uint64_t *)expectedCurrentValue,
813 (uint64_t)desiredValue);
819 #if MBED_ATOMIC_PTR_SIZE == 32 821 (
volatile uint32_t *)ptr,
822 (uint32_t *)expectedCurrentValue,
823 (uint32_t)desiredValue,
827 (
volatile uint64_t *)ptr,
828 (uint64_t *)expectedCurrentValue,
829 (uint64_t)desiredValue,
834 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \ 835 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \ 837 return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \ 840 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \ 841 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \ 843 return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \ 846 #define DO_MBED_SIGNED_FETCH_OPS(name) \ 847 DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \ 848 DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \ 849 DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \ 850 DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64) 852 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \ 853 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \ 854 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \ 855 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \ 856 DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64) 858 DO_MBED_SIGNED_FETCH_OPS(exchange)
859 DO_MBED_SIGNED_FETCH_OPS(incr)
860 DO_MBED_SIGNED_FETCH_OPS(decr)
861 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
862 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
864 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
865 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
866 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
880 #if MBED_ATOMIC_PTR_SIZE == 32 889 #if MBED_ATOMIC_PTR_SIZE == 32 898 #if MBED_ATOMIC_PTR_SIZE == 32 907 #if MBED_ATOMIC_PTR_SIZE == 32 916 #if MBED_ATOMIC_PTR_SIZE == 32 925 #if MBED_ATOMIC_PTR_SIZE == 32 934 #if MBED_ATOMIC_PTR_SIZE == 32 943 #if MBED_ATOMIC_PTR_SIZE == 32 955 MBED_CHECK_LOAD_ORDER(order);
961 MBED_CHECK_LOAD_ORDER(order);
967 MBED_CHECK_STORE_ORDER(order);
973 MBED_CHECK_STORE_ORDER(order);
977 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \ 978 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \ 979 volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \ 981 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 984 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \ 985 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \ 986 volatile T *ptr, T *expectedCurrentValue, T desiredValue, \ 987 MBED_UNUSED mbed_memory_order success, \ 988 MBED_UNUSED mbed_memory_order failure) \ 990 MBED_CHECK_CAS_ORDER(success, failure); \ 991 return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 994 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
995 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
996 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
997 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
998 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
999 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1000 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1001 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1012 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \ 1014 inline T core_util_atomic_load(const volatile T *valuePtr) \ 1016 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1020 inline T core_util_atomic_load(const T *valuePtr) \ 1022 return core_util_atomic_load_##fn_suffix(valuePtr); \ 1026 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \ 1028 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1032 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \ 1034 return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \ 1037 template<
typename T>
1038 inline T *core_util_atomic_load(T *
const volatile *valuePtr)
1043 template<
typename T>
1044 inline T *core_util_atomic_load(T *
const *valuePtr)
1049 template<
typename T>
1050 inline T *core_util_atomic_load_explicit(T *
const volatile *valuePtr,
mbed_memory_order order)
1055 template<
typename T>
1056 inline T *core_util_atomic_load_explicit(T *
const *valuePtr,
mbed_memory_order order)
1061 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1062 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1063 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1064 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1065 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1066 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1067 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1068 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1069 DO_MBED_ATOMIC_LOAD_TEMPLATE(
bool,
bool)
1071 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \ 1073 inline void core_util_atomic_store(volatile T *valuePtr, T val) \ 1075 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1079 inline void core_util_atomic_store(T *valuePtr, T val) \ 1081 core_util_atomic_store_##fn_suffix(valuePtr, val); \ 1085 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \ 1087 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1091 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \ 1093 core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \ 1096 template<
typename T>
1097 inline void core_util_atomic_store(T *
volatile *valuePtr, T *val)
1102 template<
typename T>
1103 inline void core_util_atomic_store(T **valuePtr, T *val)
1108 template<
typename T>
1109 inline void core_util_atomic_store_explicit(T *
volatile *valuePtr, T *val,
mbed_memory_order order)
1114 template<
typename T>
1115 inline void core_util_atomic_store_explicit(T **valuePtr, T *val,
mbed_memory_order order)
1120 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1121 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1122 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1123 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1124 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1125 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1126 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1127 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1128 DO_MBED_ATOMIC_STORE_TEMPLATE(
bool,
bool)
1130 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \ 1132 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \ 1134 return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \ 1137 template<
typename T>
1138 inline bool core_util_atomic_compare_exchange_strong(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue)
1143 template<
typename T>
1144 inline bool core_util_atomic_compare_exchange_weak(T *
volatile *ptr, T **expectedCurrentValue, T *desiredValue)
1149 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \ 1150 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \ 1151 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \ 1152 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \ 1153 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \ 1154 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \ 1155 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \ 1156 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \ 1157 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \ 1158 DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool) 1160 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1161 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1163 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \ 1165 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \ 1167 return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \ 1171 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \ 1172 mbed_memory_order order) \ 1174 return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \ 1179 inline bool core_util_atomic_exchange(
volatile bool *valuePtr,
bool arg)
1185 inline bool core_util_atomic_exchange_explicit(
volatile bool *valuePtr,
bool arg,
mbed_memory_order order)
1190 template<
typename T>
1191 inline T *core_util_atomic_exchange(T *
volatile *valuePtr, T *arg)
1196 template<
typename T>
1197 inline T *core_util_atomic_exchange_explicit(T *
volatile *valuePtr, T *arg,
mbed_memory_order order)
1202 template<
typename T>
1203 inline T *core_util_atomic_fetch_add(T *
volatile *valuePtr, ptrdiff_t arg)
1208 template<
typename T>
1209 inline T *core_util_atomic_fetch_add_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order)
1214 template<
typename T>
1215 inline T *core_util_atomic_fetch_sub(T *
volatile *valuePtr, ptrdiff_t arg)
1220 template<
typename T>
1221 inline T *core_util_atomic_fetch_sub_explicit(T *
volatile *valuePtr, ptrdiff_t arg,
mbed_memory_order order)
1227 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \ 1228 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \ 1229 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \ 1230 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \ 1231 DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64) 1233 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \ 1234 DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \ 1235 DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \ 1236 DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \ 1237 DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64) 1239 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1240 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1241 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1242 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1243 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1244 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1245 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1246 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1247 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1249 #endif // __cplusplus 1253 #undef MBED_SUB3_IMM 1254 #undef DO_MBED_LOCKFREE_EXCHG_ASM 1255 #undef DO_MBED_LOCKFREE_3OP_ASM 1256 #undef DO_MBED_LOCKFREE_2OP_ASM 1257 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM 1258 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM 1259 #undef DO_MBED_LOCKFREE_LOADSTORE 1260 #undef DO_MBED_LOCKFREE_EXCHG_OP 1261 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP 1262 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP 1263 #undef DO_MBED_LOCKFREE_2OP 1264 #undef DO_MBED_LOCKFREE_3OP 1265 #undef DO_MBED_LOCKFREE_EXCHG_OPS 1266 #undef DO_MBED_LOCKFREE_2OPS 1267 #undef DO_MBED_LOCKFREE_3OPS 1268 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS 1269 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS 1270 #undef DO_MBED_SIGNED_CAS_OP 1271 #undef DO_MBED_SIGNED_CAS_OPS 1272 #undef DO_MBED_SIGNED_FETCH_OP 1273 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP 1274 #undef DO_MBED_SIGNED_FETCH_OPS 1275 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS 1276 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS 1277 #undef DO_MBED_LOCKED_CAS_ORDERINGS 1278 #undef MBED_ACQUIRE_BARRIER 1279 #undef MBED_RELEASE_BARRIER 1280 #undef MBED_SEQ_CST_BARRIER 1281 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE 1282 #undef DO_MBED_ATOMIC_STORE_TEMPLATE 1283 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE 1284 #undef DO_MBED_ATOMIC_CAS_TEMPLATE 1285 #undef DO_MBED_ATOMIC_CAS_TEMPLATES 1286 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE 1287 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES 1288 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
A lock-free, primitive atomic flag.