Kenji Arai / mbed-os_TYBLE16

Dependents:   TYBLE16_simple_data_logger TYBLE16_MP3_Air

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers mbed_atomic_impl.h Source File

mbed_atomic_impl.h

00001 /*
00002  * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
00003  * SPDX-License-Identifier: Apache-2.0
00004  *
00005  * Licensed under the Apache License, Version 2.0 (the "License"); you may
00006  * not use this file except in compliance with the License.
00007  * You may obtain a copy of the License at
00008  *
00009  * http://www.apache.org/licenses/LICENSE-2.0
00010  *
00011  * Unless required by applicable law or agreed to in writing, software
00012  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
00013  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00014  * See the License for the specific language governing permissions and
00015  * limitations under the License.
00016  */
00017 
00018 #ifndef __MBED_ATOMIC_IMPL_H__
00019 #define __MBED_ATOMIC_IMPL_H__
00020 
00021 #ifndef __MBED_UTIL_ATOMIC_H__
00022 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h"
00023 #endif
00024 
00025 #include <stdint.h>
00026 #include "cmsis.h"
00027 #include "platform/mbed_assert.h"
00028 #include "platform/mbed_toolchain.h"
00029 
00030 #ifdef __cplusplus
00031 extern "C" {
00032 #endif
00033 
00034 #ifdef MBED_DEBUG
00035 /* Plain loads must not have "release" or "acquire+release" order */
00036 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel)
00037 
00038 /* Plain stores must not have "consume", "acquire" or "acquire+release" order */
00039 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel)
00040 
00041 /* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */
00042 #define MBED_CHECK_CAS_ORDER(success, failure) \
00043     MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel)
00044 #else
00045 #define MBED_CHECK_LOAD_ORDER(order) (void)0
00046 #define MBED_CHECK_STORE_ORDER(order) (void)0
00047 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0
00048 #endif
00049 
00050 /* This is currently just to silence unit tests, so no better test required */
00051 #ifdef __MBED__
00052 #define MBED_ATOMIC_PTR_SIZE 32
00053 #else
00054 #define MBED_ATOMIC_PTR_SIZE 64
00055 #endif
00056 
00057 /* Place barrier after a load or read-modify-write if a consume or acquire operation */
00058 #define MBED_ACQUIRE_BARRIER(order) do { \
00059     if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \
00060         MBED_BARRIER(); \
00061     } } while (0)
00062 
00063 /* Place barrier before a store or read-modify-write if a release operation */
00064 #define MBED_RELEASE_BARRIER(order) do { \
00065     if ((order) & mbed_memory_order_release) { \
00066         MBED_BARRIER(); \
00067     } } while (0)
00068 
00069 /* Place barrier after a plain store if a sequentially consistent */
00070 #define MBED_SEQ_CST_BARRIER(order) do { \
00071     if ((order) == mbed_memory_order_seq_cst) { \
00072         MBED_BARRIER(); \
00073     } } while (0)
00074 
00075 
00076 
00077 #if MBED_EXCLUSIVE_ACCESS
00078 
00079 /* This header file provides C inline definitions for atomic functions. */
00080 /* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */
00081 
00082 /****************************** ASSEMBLER **********************************/
00083 
00084 // Fiddle about with constraints. These work for GCC and clang, but
00085 // IAR appears to be restricted to having only a single constraint,
00086 // so we can't do immediates.
00087 #if MBED_EXCLUSIVE_ACCESS_THUMB1
00088 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB
00089 #define MBED_CMP_IMM "I" // CMP 8-bit immediate
00090 #define MBED_SUB3_IMM "L" // -7 to +7
00091 #else
00092 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers
00093 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate
00094 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate
00095 #endif
00096 
00097 // ARM C 5 inline assembler recommends against using LDREX/STREX
00098 // for same reason as intrinsics, but there's no other way to get
00099 // inlining. ARM C 5 is being retired anyway.
00100 
00101 #ifdef __CC_ARM
00102 #pragma diag_suppress 3732
00103 #define DO_MBED_LOCKFREE_EXCHG_ASM(M)                           \
00104     __asm {                                                     \
00105         LDREX##M    oldValue, [valuePtr]                      ; \
00106         STREX##M    fail, newValue, [valuePtr]                  \
00107     }
00108 #elif defined __clang__ || defined __GNUC__
00109 #define DO_MBED_LOCKFREE_EXCHG_ASM(M)                           \
00110     __asm volatile (                                            \
00111         ".syntax unified\n\t"                                   \
00112         "LDREX"#M "\t%[oldValue], %[value]\n\t"                 \
00113         "STREX"#M "\t%[fail], %[newValue], %[value]\n\t"        \
00114       : [oldValue] "=&r" (oldValue),                            \
00115         [fail] "=&r" (fail),                                    \
00116         [value] "+Q" (*valuePtr)                                \
00117       : [newValue] "r" (newValue)                               \
00118       :                                                         \
00119     )
00120 #elif defined __ICCARM__
00121 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
00122 #define DO_MBED_LOCKFREE_EXCHG_ASM(M)                           \
00123     asm volatile (                                              \
00124         "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n"              \
00125         "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n"     \
00126       : [oldValue] "=&r" (oldValue),                            \
00127         [fail] "=&r" (fail)                                     \
00128       : [valuePtr] "r" (valuePtr),                              \
00129         [newValue] "r" (newValue)                               \
00130       : "memory"                                                \
00131     )
00132 #endif
00133 
00134 #ifdef __CC_ARM
00135 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M)       \
00136     __asm {                                                     \
00137         LDREX##M    newValue, [valuePtr]                      ; \
00138         OP          newValue, arg                             ; \
00139         STREX##M    fail, newValue, [valuePtr]                  \
00140     }
00141 #elif defined __clang__ || defined __GNUC__
00142 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M)       \
00143     __asm volatile (                                            \
00144         ".syntax unified\n\t"                                   \
00145         "LDREX"#M "\t%[newValue], %[value]\n\t"                 \
00146         #OP       "\t%[newValue], %[arg]\n\t"                   \
00147         "STREX"#M "\t%[fail], %[newValue], %[value]\n\t"        \
00148       : [newValue] "=&" MBED_DOP_REG (newValue),                \
00149         [fail] "=&r" (fail),                                    \
00150         [value] "+Q" (*valuePtr)                                \
00151       : [arg] Constants MBED_DOP_REG (arg)                      \
00152       : "cc"                                                    \
00153     )
00154 #elif defined __ICCARM__
00155 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
00156 /* IAR does not support "ADDS reg, reg", so write as 3-operand */
00157 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M)       \
00158     asm volatile (                                              \
00159         "LDREX"#M "\t%[newValue], [%[valuePtr]]\n"              \
00160         #OP       "\t%[newValue], %[newValue], %[arg]\n"        \
00161         "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n"     \
00162       : [newValue] "=&r" (newValue),                            \
00163         [fail] "=&r" (fail)                                     \
00164       : [valuePtr] "r" (valuePtr),                              \
00165         [arg] "r" (arg)                                         \
00166       : "memory", "cc"                                          \
00167     )
00168 #endif
00169 
00170 #ifdef __CC_ARM
00171 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M)       \
00172     __asm {                                                     \
00173         LDREX##M    oldValue, [valuePtr]                      ; \
00174         OP          newValue, oldValue, arg                   ; \
00175         STREX##M    fail, newValue, [valuePtr]                  \
00176     }
00177 #elif defined __clang__ || defined __GNUC__
00178 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M)       \
00179     __asm volatile (                                            \
00180         ".syntax unified\n\t"                                   \
00181         "LDREX"#M "\t%[oldValue], %[value]\n\t"                 \
00182         #OP       "\t%[newValue], %[oldValue], %[arg]\n\t"      \
00183         "STREX"#M "\t%[fail], %[newValue], %[value]\n\t"        \
00184       : [oldValue] "=&" MBED_DOP_REG (oldValue),                \
00185         [newValue] "=&" MBED_DOP_REG (newValue),                \
00186         [fail] "=&r" (fail),                                    \
00187         [value] "+Q" (*valuePtr)                                \
00188       : [arg] Constants MBED_DOP_REG (arg)                      \
00189       : "cc"                                                    \
00190     )
00191 #elif defined __ICCARM__
00192 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
00193 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M)       \
00194     asm volatile (                                              \
00195         "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n"              \
00196         #OP       "\t%[newValue], %[oldValue], %[arg]\n"        \
00197         "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n"     \
00198       : [oldValue] "=&r" (oldValue),                            \
00199         [newValue] "=&r" (newValue),                            \
00200         [fail] "=&r" (fail)                                     \
00201       : [valuePtr] "r" (valuePtr),                              \
00202         [arg] "r" (arg)                                         \
00203       : "memory", "cc"                                          \
00204     )
00205 #endif
00206 
00207 /* Bitwise operations are harder to do in ARMv8-M baseline - there
00208  * are only 2-operand versions of the instructions.
00209  */
00210 #ifdef __CC_ARM
00211 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M)       \
00212     __asm {                                                     \
00213         LDREX##M    oldValue, [valuePtr]                      ; \
00214         MOV         newValue, oldValue                        ; \
00215         OP          newValue, arg                             ; \
00216         STREX##M    fail, newValue, [valuePtr]                  \
00217     }
00218 #elif defined __clang__ || defined __GNUC__
00219 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M)       \
00220     __asm volatile (                                            \
00221         ".syntax unified\n\t"                                   \
00222         "LDREX"#M "\t%[oldValue], %[value]\n\t"                 \
00223         "MOV"     "\t%[newValue], %[oldValue]\n\t"              \
00224         #OP       "\t%[newValue], %[arg]\n\t"                   \
00225         "STREX"#M "\t%[fail], %[newValue], %[value]\n\t"        \
00226       : [oldValue] "=&r" (oldValue),                            \
00227         [newValue] "=&l" (newValue),                            \
00228         [fail] "=&r" (fail),                                    \
00229         [value] "+Q" (*valuePtr)                                \
00230       : [arg] Constants "l" (arg)                               \
00231       : "cc"                                                    \
00232     )
00233 #elif defined __ICCARM__
00234 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M)       \
00235     asm volatile (                                              \
00236         "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n"              \
00237         "MOV"     "\t%[newValue], %[oldValue]\n"                \
00238         #OP       "\t%[newValue], %[arg]\n"                     \
00239         "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n"     \
00240       : [oldValue] "=&r" (oldValue),                            \
00241         [newValue] "=&r" (newValue),                            \
00242         [fail] "=&r" (fail)                                     \
00243       : [valuePtr] "r" (valuePtr),                              \
00244         [arg] "r" (arg)                                         \
00245       : "memory", "cc"                                          \
00246     )
00247 #endif
00248 
00249 /* Note that we split ARM and Thumb implementations for CAS, as
00250  * the key distinction is the handling of conditions. Thumb-2 IT is
00251  * partially deprecated, so avoid it, making Thumb-1 and Thumb-2
00252  * implementations the same.
00253  */
00254 #if MBED_EXCLUSIVE_ACCESS_ARM
00255 #ifdef __CC_ARM
00256 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00257     __asm {                                                     \
00258         LDREX##M     oldValue, [ptr]                          ; \
00259         SUBS         fail, oldValue, expectedValue            ; \
00260         STREX##M##EQ fail, desiredValue, [ptr]                  \
00261     }
00262 #elif defined __clang__ || defined __GNUC__
00263 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00264     __asm volatile (                                            \
00265         ".syntax unified\n\t"                                   \
00266         "LDREX"#M  "\t%[oldValue], %[value]\n\t"                \
00267         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
00268         "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t"   \
00269       : [oldValue] "=&r" (oldValue),                            \
00270         [fail] "=&r" (fail),                                    \
00271         [value] "+Q" (*ptr)                                     \
00272       : [desiredValue] "r" (desiredValue),                      \
00273         [expectedValue] "ILr" (expectedValue)                   \
00274       : "cc"                                                    \
00275     )
00276 #elif defined __ICCARM__
00277 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00278     asm volatile (                                              \
00279         "LDREX"#M  "\t%[oldValue], [%[valuePtr]]\n"             \
00280         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n" \
00281         "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
00282       : [oldValue] "=&r" (oldValue),                            \
00283         [fail] "=&r" (fail)                                     \
00284       : [desiredValue] "r" (desiredValue),                      \
00285         [expectedValue] "r" (expectedValue),                    \
00286         [valuePtr] "r" (ptr),                                   \
00287       : "memory", "cc"                                          \
00288     )
00289 #endif
00290 #else // MBED_EXCLUSIVE_ACCESS_ARM
00291 #ifdef __CC_ARM
00292 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00293     __asm {                                                     \
00294         LDREX##M     oldValue, [ptr]                          ; \
00295         SUBS         fail, oldValue, expectedValue            ; \
00296         BNE          done                                     ; \
00297         STREX##M     fail, desiredValue, [ptr]                ; \
00298 done:                                                           \
00299     }
00300 #elif defined __clang__ || defined __GNUC__
00301 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00302     __asm volatile (                                            \
00303         ".syntax unified\n\t"                                   \
00304         "LDREX"#M  "\t%[oldValue], %[value]\n\t"                \
00305         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
00306         "BNE"      "\t%=f\n\t"                                  \
00307         "STREX"#M  "\t%[fail], %[desiredValue], %[value]\n"     \
00308         "%=:"                                                   \
00309       : [oldValue] "=&" MBED_DOP_REG (oldValue),                \
00310         [fail] "=&" MBED_DOP_REG (fail),                        \
00311         [value] "+Q" (*ptr)                                     \
00312       : [desiredValue] "r" (desiredValue),                      \
00313         [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
00314       : "cc"                                                    \
00315     )
00316 #elif defined __ICCARM__
00317 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M)                        \
00318     asm volatile (                                              \
00319         "LDREX"#M  "\t%[oldValue], [%[valuePtr]]\n"             \
00320         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n" \
00321         "BNE"      "\tdone\n\t"                                 \
00322         "STREX"#M  "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
00323         "done:"                                                 \
00324       : [oldValue] "=&r" (oldValue),                            \
00325         [fail] "=&r" (fail)                                     \
00326       : [desiredValue] "r" (desiredValue),                      \
00327         [expectedValue] "r" (expectedValue),                    \
00328         [valuePtr] "r" (ptr)                                    \
00329       : "memory", "cc"                                          \
00330     )
00331 #endif
00332 #endif // MBED_EXCLUSIVE_ACCESS_ARM
00333 
00334 /* For strong CAS, conditional execution is complex enough to
00335  * not be worthwhile, so all implementations look like Thumb-1.
00336  * (This is the operation for which STREX returning 0 for success
00337  * is beneficial.)
00338  */
00339 #ifdef __CC_ARM
00340 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M)                      \
00341     __asm {                                                     \
00342     retry:                                                    ; \
00343         LDREX##M     oldValue, [ptr]                          ; \
00344         SUBS         fail, oldValue, expectedValue            ; \
00345         BNE          done                                     ; \
00346         STREX##M     fail, desiredValue, [ptr]                ; \
00347         CMP          fail, 0                                  ; \
00348         BNE          retry                                    ; \
00349     done:                                                       \
00350     }
00351 #elif defined __clang__ || defined __GNUC__
00352 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M)                      \
00353     __asm volatile (                                            \
00354         ".syntax unified\n\t"                                   \
00355         "\n%=:\n\t"                                             \
00356         "LDREX"#M  "\t%[oldValue], %[value]\n\t"                \
00357         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
00358         "BNE"      "\t%=f\n"                                    \
00359         "STREX"#M  "\t%[fail], %[desiredValue], %[value]\n\t"   \
00360         "CMP"      "\t%[fail], #0\n\t"                          \
00361         "BNE"      "\t%=b\n"                                    \
00362         "%=:"                                                   \
00363       : [oldValue] "=&" MBED_DOP_REG (oldValue),                \
00364         [fail] "=&" MBED_DOP_REG (fail),                        \
00365         [value] "+Q" (*ptr)                                     \
00366       : [desiredValue] "r" (desiredValue),                      \
00367         [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
00368       : "cc"                                                    \
00369     )
00370 #elif defined __ICCARM__
00371 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M)                      \
00372     asm volatile (                                              \
00373         "retry:\n"                                              \
00374         "LDREX"#M  "\t%[oldValue], [%[valuePtr]]\n"             \
00375         "SUBS"     "\t%[fail], %[oldValue], %[expectedValue]\n" \
00376         "BNE"      "\tdone\n"                                   \
00377         "STREX"#M  "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
00378         "CMP"      "\t%[fail], #0\n"                            \
00379         "BNE"      "\tretry\n"                                  \
00380         "done:"                                                 \
00381       : [oldValue] "=&r" (oldValue),                            \
00382         [fail] "=&r" (fail)                                     \
00383       : [desiredValue] "r" (desiredValue),                      \
00384         [expectedValue] "r" (expectedValue),                    \
00385         [valuePtr] "r" (ptr)                                    \
00386       : "memory", "cc"                                          \
00387     )
00388 #endif
00389 
00390 /********************* LOCK-FREE IMPLEMENTATION MACROS ****************/
00391 
00392 /* Note care taken with types here. Values which the assembler outputs correctly
00393  * narrowed, or inputs without caring about width, are marked as type T. Other
00394  * values are uint32_t. It's not clear from documentation whether assembler
00395  * assumes anything about widths,  but try to signal correctly to get necessary
00396  * narrowing, and avoid unnecessary.
00397  * Tests show that GCC in particular will pass in unnarrowed values - eg passing
00398  * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8,
00399  * but wouldn't be for compare_and_exchange_u8.
00400  * On the other hand, it seems to be impossible to stop GCC inserting narrowing
00401  * instructions for the output - it will always put in UXTB for the oldValue of
00402  * an operation.
00403  */
00404 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M)                              \
00405 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \
00406 {                                                                               \
00407     T oldValue;                                                                 \
00408     uint32_t fail;                                                              \
00409     MBED_BARRIER();                                                             \
00410     DO_MBED_LOCKFREE_EXCHG_ASM(M);                                              \
00411     MBED_BARRIER();                                                             \
00412     return oldValue;                                                            \
00413 }                                                                               \
00414                                                                                 \
00415 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix(              \
00416         volatile T *valuePtr, T newValue, mbed_memory_order order)              \
00417 {                                                                               \
00418     T oldValue;                                                                 \
00419     uint32_t fail;                                                              \
00420     MBED_RELEASE_BARRIER(order);                                                \
00421     DO_MBED_LOCKFREE_EXCHG_ASM(M);                                              \
00422     MBED_ACQUIRE_BARRIER(order);                                                \
00423     return oldValue;                                                            \
00424 }
00425 
00426 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M)                           \
00427 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
00428 {                                                                               \
00429     MBED_BARRIER();                                                             \
00430     T oldValue;                                                                 \
00431     uint32_t fail, expectedValue = *expectedCurrentValue;                       \
00432     DO_MBED_LOCKFREE_CAS_WEAK_ASM(M);                                           \
00433     if (fail) {                                                                 \
00434         *expectedCurrentValue = oldValue;                                       \
00435     }                                                                           \
00436     MBED_BARRIER();                                                             \
00437     return !fail;                                                               \
00438 }                                                                               \
00439                                                                                 \
00440 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
00441 {                                                                               \
00442     MBED_CHECK_CAS_ORDER(success, failure);                                     \
00443     MBED_RELEASE_BARRIER(success);                                              \
00444     T oldValue;                                                                 \
00445     uint32_t fail, expectedValue = *expectedCurrentValue;                       \
00446     DO_MBED_LOCKFREE_CAS_WEAK_ASM(M);                                           \
00447     if (fail) {                                                                 \
00448         *expectedCurrentValue = oldValue;                                       \
00449     }                                                                           \
00450     MBED_ACQUIRE_BARRIER(fail ? failure : success);                             \
00451     return !fail;                                                               \
00452 }
00453 
00454 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M)                         \
00455 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
00456 {                                                                               \
00457     MBED_BARRIER();                                                             \
00458     T oldValue;                                                                 \
00459     uint32_t fail, expectedValue = *expectedCurrentValue;                       \
00460     DO_MBED_LOCKFREE_CAS_STRONG_ASM(M);                                         \
00461     if (fail) {                                                                 \
00462         *expectedCurrentValue = oldValue;                                       \
00463     }                                                                           \
00464     MBED_BARRIER();                                                             \
00465     return !fail;                                                               \
00466 }                                                                               \
00467                                                                                 \
00468 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
00469 {                                                                               \
00470     MBED_CHECK_CAS_ORDER(success, failure);                                     \
00471     MBED_RELEASE_BARRIER(success);                                              \
00472     T oldValue;                                                                 \
00473     uint32_t fail, expectedValue = *expectedCurrentValue;                       \
00474     DO_MBED_LOCKFREE_CAS_STRONG_ASM(M);                                         \
00475     if (fail) {                                                                 \
00476         *expectedCurrentValue = oldValue;                                       \
00477     }                                                                           \
00478     MBED_ACQUIRE_BARRIER(fail ? failure : success);                             \
00479     return !fail;                                                               \
00480 }
00481 
00482 
00483 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M)       \
00484 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg)     \
00485 {                                                                               \
00486     uint32_t fail, newValue;                                                    \
00487     MBED_BARRIER();                                                             \
00488     do {                                                                        \
00489         DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M);                      \
00490     } while (fail);                                                             \
00491     MBED_BARRIER();                                                             \
00492     return (T) newValue;                                                        \
00493 }                                                                               \
00494                                                                                 \
00495 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(              \
00496         volatile T *valuePtr, T arg, mbed_memory_order order)                   \
00497 {                                                                               \
00498     uint32_t fail, newValue;                                                    \
00499     MBED_RELEASE_BARRIER(order);                                                \
00500     do {                                                                        \
00501         DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M);                      \
00502     } while (fail);                                                             \
00503     MBED_ACQUIRE_BARRIER(order);                                                \
00504     return (T) newValue;                                                        \
00505 }                                                                               \
00506 
00507 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M)       \
00508 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg)     \
00509 {                                                                               \
00510     T oldValue;                                                                 \
00511     uint32_t fail, newValue;                                                    \
00512     MBED_BARRIER();                                                             \
00513     do {                                                                        \
00514         DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M);                      \
00515     } while (fail);                                                             \
00516     MBED_BARRIER();                                                             \
00517     return oldValue;                                                            \
00518 }                                                                               \
00519                                                                                 \
00520 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(              \
00521         volatile T *valuePtr, T arg, mbed_memory_order order)                   \
00522 {                                                                               \
00523     T oldValue;                                                                 \
00524     uint32_t fail, newValue;                                                    \
00525     MBED_RELEASE_BARRIER(order);                                                \
00526     do {                                                                        \
00527         DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M);                      \
00528     } while (fail);                                                             \
00529     MBED_ACQUIRE_BARRIER(order);                                                \
00530     return oldValue;                                                            \
00531 }                                                                               \
00532 
00533 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M)       \
00534 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) {   \
00535     T oldValue;                                                                 \
00536     uint32_t fail, newValue;                                                    \
00537     MBED_BARRIER();                                                             \
00538     do {                                                                        \
00539         DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M);                      \
00540     } while (fail);                                                             \
00541     MBED_BARRIER();                                                             \
00542     return oldValue;                                                            \
00543 }                                                                               \
00544                                                                                 \
00545 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(              \
00546         volatile T *valuePtr, T arg, mbed_memory_order order)                   \
00547 {                                                                               \
00548     T oldValue;                                                                 \
00549     uint32_t fail, newValue;                                                    \
00550     MBED_RELEASE_BARRIER(order);                                                \
00551     do {                                                                        \
00552         DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M);                      \
00553     } while (fail);                                                             \
00554     MBED_ACQUIRE_BARRIER(order);                                                \
00555     return oldValue;                                                            \
00556 }                                                                               \
00557 
00558 inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
00559 {
00560     MBED_BARRIER();
00561     bool oldValue, newValue = true;
00562     uint32_t fail;
00563     do {
00564         DO_MBED_LOCKFREE_EXCHG_ASM(B);
00565     } while (fail);
00566     MBED_BARRIER();
00567     return oldValue;
00568 }
00569 
00570 MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
00571 {
00572     MBED_RELEASE_BARRIER(order);
00573     bool oldValue, newValue = true;
00574     uint32_t fail;
00575     do {
00576         DO_MBED_LOCKFREE_EXCHG_ASM(B);
00577     } while (fail);
00578     MBED_ACQUIRE_BARRIER(order);
00579     return oldValue;
00580 }
00581 
00582 /********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/
00583 
00584 #define DO_MBED_LOCKFREE_EXCHG_OPS() \
00585     DO_MBED_LOCKFREE_EXCHG_OP(uint8_t,  u8,  B) \
00586     DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
00587     DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32,  )
00588 
00589 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \
00590     DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t,  u8,  B) \
00591     DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
00592     DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32,  )
00593 
00594 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \
00595     DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t,  u8,  B) \
00596     DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \
00597     DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32,  )
00598 
00599 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \
00600     DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t,  u8,  B) \
00601     DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
00602     DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32,  )
00603 
00604 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
00605     DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t,  u8,  B) \
00606     DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \
00607     DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32,  )
00608 
00609 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \
00610     DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t,  u8,  B) \
00611     DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
00612     DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32,  )
00613 
00614 // Note that these macros define a number of functions that are
00615 // not in mbed_atomic.h, like core_util_atomic_and_fetch_u16.
00616 // These are not documented via the doxygen in mbed_atomic.h, so
00617 // for now should be regarded as internal only. They are used by the
00618 // Atomic<T> template as an optimisation though.
00619 
00620 // We always use the "S" form of operations - avoids yet another
00621 // possible unneeded distinction between Thumbv1 and Thumbv2, and
00622 // may reduce code size by allowing 16-bit instructions.
00623 #if !MBED_EXCLUSIVE_ACCESS_THUMB1
00624 // I constraint is 12-bit modified immediate constant
00625 // L constraint is negated 12-bit modified immediate constant
00626 // (relying on assembler to swap ADD/SUB)
00627 // We could permit J (-4095 to +4095) if we used ADD/SUB
00628 // instead of ADDS/SUBS, but then that would block generation
00629 // of the 16-bit forms. Shame we can't specify "don't care"
00630 // for the "S", or get the GNU multi-alternative to
00631 // choose ADDS/ADD appropriately.
00632 
00633 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL")
00634 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr,      ADDS, "IL")
00635 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
00636 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr,      SUBS, "IL")
00637 // K constraint is inverted 12-bit modified immediate constant
00638 // (relying on assembler substituting BIC for AND)
00639 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
00640 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
00641 #if MBED_EXCLUSIVE_ACCESS_ARM
00642 // ARM does not have ORN instruction, so take plain immediates.
00643 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or,  ORRS, "I")
00644 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch,  ORRS, "I")
00645 #else
00646 // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
00647 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or,  ORRS, "IK")
00648 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch,  ORRS, "IK")
00649 #endif
00650 // I constraint is 12-bit modified immediate operand
00651 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I")
00652 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
00653 #else // MBED_EXCLUSIVE_ACCESS_THUMB1
00654 // I constraint is 0-255; J is -255 to -1, suitable for
00655 // 2-op ADD/SUB (relying on assembler to swap ADD/SUB)
00656 // L constraint is -7 to +7, suitable for 3-op ADD/SUB
00657 // (relying on assembler to swap ADD/SUB)
00658 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L")
00659 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr,      ADDS, "IJ")
00660 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
00661 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr,      SUBS, "IJ")
00662 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
00663 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
00664 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or,  ORRS, "")
00665 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch,  ORRS, "")
00666 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
00667 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
00668 #endif
00669 
00670 DO_MBED_LOCKFREE_EXCHG_OPS()
00671 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
00672 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
00673 
00674 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
00675     DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
00676 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
00677     DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
00678 #else // MBED_EXCLUSIVE_ACCESS
00679 /* All the operations are locked, so need no ordering parameters */
00680 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
00681     DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t,  u8) \
00682     DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \
00683     DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \
00684     DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
00685 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
00686     DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t,  u8) \
00687     DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \
00688     DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \
00689     DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
00690 
00691 MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
00692 {
00693     return core_util_atomic_flag_test_and_set(valuePtr);
00694 }
00695 #endif // MBED_EXCLUSIVE_ACCESS
00696 
00697 /********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE  ****************/
00698 
00699 /* Lock-free loads and stores don't need assembler - just aligned accesses */
00700 /* Silly ordering of `T volatile` is because T can be `void *` */
00701 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix)                             \
00702 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr)       \
00703 {                                                                               \
00704     T value = *valuePtr;                                                        \
00705     MBED_BARRIER();                                                             \
00706     return value;                                                               \
00707 }                                                                               \
00708                                                                                 \
00709 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
00710 {                                                                               \
00711     MBED_CHECK_LOAD_ORDER(order);                                               \
00712     T value = *valuePtr;                                                        \
00713     MBED_ACQUIRE_BARRIER(order);                                                \
00714     return value;                                                               \
00715 }                                                                               \
00716                                                                                 \
00717 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
00718 {                                                                               \
00719     MBED_BARRIER();                                                             \
00720     *valuePtr = value;                                                          \
00721     MBED_BARRIER();                                                             \
00722 }                                                                               \
00723                                                                                 \
00724 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
00725 {                                                                               \
00726     MBED_CHECK_STORE_ORDER(order);                                              \
00727     MBED_RELEASE_BARRIER(order);                                                \
00728     *valuePtr = value;                                                          \
00729     MBED_SEQ_CST_BARRIER(order);                                                \
00730 }
00731 
00732 MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
00733 {
00734     MBED_BARRIER();
00735     flagPtr->_flag = false;
00736     MBED_BARRIER();
00737 }
00738 
00739 MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
00740 {
00741     MBED_CHECK_STORE_ORDER(order);
00742     MBED_RELEASE_BARRIER(order);
00743     flagPtr->_flag = false;
00744     MBED_SEQ_CST_BARRIER(order);
00745 }
00746 
00747 #ifdef __cplusplus
00748 // Temporarily turn off extern "C", so we can provide non-volatile load/store
00749 // overloads for efficiency. All these functions are static inline, so this has
00750 // no linkage effect exactly, it just permits the overloads.
00751 } // extern "C"
00752 
00753 // For efficiency it's worth having non-volatile overloads
00754 MBED_FORCEINLINE void core_util_atomic_flag_clear(core_util_atomic_flag *flagPtr)
00755 {
00756     MBED_BARRIER();
00757     flagPtr->_flag = false;
00758     MBED_BARRIER();
00759 }
00760 
00761 MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(core_util_atomic_flag *flagPtr, mbed_memory_order order)
00762 {
00763     MBED_RELEASE_BARRIER(order);
00764     flagPtr->_flag = false;
00765     MBED_SEQ_CST_BARRIER(order);
00766 }
00767 
00768 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
00769 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
00770 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
00771 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
00772 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
00773 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
00774 DO_MBED_LOCKFREE_LOADSTORE(bool,, bool)
00775 DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
00776 
00777 #endif
00778 
00779 DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8)
00780 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
00781 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
00782 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
00783 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
00784 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
00785 DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool)
00786 DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr)
00787 
00788 #ifdef __cplusplus
00789 extern "C" {
00790 #endif
00791 
00792 /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS  ****************/
00793 
00794 MBED_FORCEINLINE int64_t core_util_atomic_load_s64 (const volatile int64_t *valuePtr)
00795 {
00796     return (int64_t)core_util_atomic_load_u64 ((const volatile uint64_t *)valuePtr);
00797 }
00798 
00799 MBED_FORCEINLINE void core_util_atomic_store_s64 (volatile int64_t *valuePtr, int64_t desiredValue)
00800 {
00801     core_util_atomic_store_u64 ((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
00802 }
00803 
00804 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix)                                           \
00805 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr,               \
00806         T *expectedCurrentValue, T desiredValue)                                            \
00807 {                                                                                           \
00808     return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr,                     \
00809                    (u##T *)expectedCurrentValue, (u##T)desiredValue);                       \
00810 }                                                                                           \
00811                                                                                             \
00812 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr,      \
00813         T *expectedCurrentValue, T desiredValue,                                            \
00814         mbed_memory_order success, mbed_memory_order failure)                               \
00815 {                                                                                           \
00816     return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr,            \
00817                    (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure);     \
00818 }
00819 
00820 #define DO_MBED_SIGNED_CAS_OPS(name) \
00821         DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \
00822         DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \
00823         DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \
00824         DO_MBED_SIGNED_CAS_OP(name, int64_t, 64)
00825 
00826 DO_MBED_SIGNED_CAS_OPS(cas)
00827 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
00828 
00829 MBED_FORCEINLINE bool core_util_atomic_cas_bool (volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
00830 {
00831     return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
00832 }
00833 
00834 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool (volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
00835 {
00836     return core_util_atomic_cas_explicit_u8 ((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
00837 }
00838 
00839 inline bool core_util_atomic_cas_ptr (void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
00840 {
00841 #if MBED_ATOMIC_PTR_SIZE == 32
00842     return core_util_atomic_cas_u32 (
00843                (volatile uint32_t *)ptr,
00844                (uint32_t *)expectedCurrentValue,
00845                (uint32_t)desiredValue);
00846 #else
00847     return core_util_atomic_cas_u64 (
00848                (volatile uint64_t *)ptr,
00849                (uint64_t *)expectedCurrentValue,
00850                (uint64_t)desiredValue);
00851 #endif
00852 }
00853 
00854 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr (void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
00855 {
00856 #if MBED_ATOMIC_PTR_SIZE == 32
00857     return core_util_atomic_cas_explicit_u32 (
00858                (volatile uint32_t *)ptr,
00859                (uint32_t *)expectedCurrentValue,
00860                (uint32_t)desiredValue,
00861                success, failure);
00862 #else
00863     return core_util_atomic_cas_explicit_u64 (
00864                (volatile uint64_t *)ptr,
00865                (uint64_t *)expectedCurrentValue,
00866                (uint64_t)desiredValue,
00867                success, failure);
00868 #endif
00869 }
00870 
00871 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool (volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
00872 {
00873     return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
00874 }
00875 
00876 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool (volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
00877 {
00878     return core_util_atomic_compare_exchange_weak_explicit_u8 ((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
00879 }
00880 
00881 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr (void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
00882 {
00883 #if MBED_ATOMIC_PTR_SIZE == 32
00884     return core_util_atomic_compare_exchange_weak_u32 (
00885                (volatile uint32_t *)ptr,
00886                (uint32_t *)expectedCurrentValue,
00887                (uint32_t)desiredValue);
00888 #else
00889     return core_util_atomic_compare_exchange_weak_u64 (
00890                (volatile uint64_t *)ptr,
00891                (uint64_t *)expectedCurrentValue,
00892                (uint64_t)desiredValue);
00893 #endif
00894 }
00895 
00896 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr (void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
00897 {
00898 #if MBED_ATOMIC_PTR_SIZE == 32
00899     return core_util_atomic_compare_exchange_weak_explicit_u32 (
00900                (volatile uint32_t *)ptr,
00901                (uint32_t *)expectedCurrentValue,
00902                (uint32_t)desiredValue,
00903                success, failure);
00904 #else
00905     return core_util_atomic_compare_exchange_weak_explicit_u64 (
00906                (volatile uint64_t *)ptr,
00907                (uint64_t *)expectedCurrentValue,
00908                (uint64_t)desiredValue,
00909                success, failure);
00910 #endif
00911 }
00912 
00913 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix)                                         \
00914 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg)      \
00915 {                                                                                           \
00916     return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \
00917 }
00918 
00919 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix)                                \
00920 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \
00921 {                                                                                           \
00922     return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \
00923 }
00924 
00925 #define DO_MBED_SIGNED_FETCH_OPS(name) \
00926     DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \
00927     DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \
00928     DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \
00929     DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64)
00930 
00931 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \
00932         DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \
00933         DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \
00934         DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \
00935         DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64)
00936 
00937 DO_MBED_SIGNED_FETCH_OPS(exchange)
00938 DO_MBED_SIGNED_FETCH_OPS(incr)
00939 DO_MBED_SIGNED_FETCH_OPS(decr)
00940 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
00941 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
00942 
00943 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
00944 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
00945 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
00946 
00947 MBED_FORCEINLINE bool core_util_atomic_exchange_bool (volatile bool *valuePtr, bool desiredValue)
00948 {
00949     return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
00950 }
00951 
00952 MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool (volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
00953 {
00954     return (bool)core_util_atomic_exchange_explicit_u8 ((volatile uint8_t *)valuePtr, desiredValue, order);
00955 }
00956 
00957 inline void *core_util_atomic_exchange_ptr (void *volatile *valuePtr, void *desiredValue)
00958 {
00959 #if MBED_ATOMIC_PTR_SIZE == 32
00960     return (void *)core_util_atomic_exchange_u32 ((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
00961 #else
00962     return (void *)core_util_atomic_exchange_u64 ((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
00963 #endif
00964 }
00965 
00966 MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr (void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
00967 {
00968 #if MBED_ATOMIC_PTR_SIZE == 32
00969     return (void *)core_util_atomic_exchange_explicit_u32 ((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order);
00970 #else
00971     return (void *)core_util_atomic_exchange_explicit_u64 ((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order);
00972 #endif
00973 }
00974 
00975 inline void *core_util_atomic_incr_ptr (void *volatile *valuePtr, ptrdiff_t delta)
00976 {
00977 #if MBED_ATOMIC_PTR_SIZE == 32
00978     return (void *)core_util_atomic_incr_u32 ((volatile uint32_t *)valuePtr, (uint32_t)delta);
00979 #else
00980     return (void *)core_util_atomic_incr_u64 ((volatile uint64_t *)valuePtr, (uint64_t)delta);
00981 #endif
00982 }
00983 
00984 inline void *core_util_atomic_decr_ptr (void *volatile *valuePtr, ptrdiff_t delta)
00985 {
00986 #if MBED_ATOMIC_PTR_SIZE == 32
00987     return (void *)core_util_atomic_decr_u32 ((volatile uint32_t *)valuePtr, (uint32_t)delta);
00988 #else
00989     return (void *)core_util_atomic_decr_u64 ((volatile uint64_t *)valuePtr, (uint64_t)delta);
00990 #endif
00991 }
00992 
00993 MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr (void *volatile *valuePtr, ptrdiff_t arg)
00994 {
00995 #if MBED_ATOMIC_PTR_SIZE == 32
00996     return (void *)core_util_atomic_fetch_add_u32 ((volatile uint32_t *)valuePtr, (uint32_t)arg);
00997 #else
00998     return (void *)core_util_atomic_fetch_add_u64 ((volatile uint64_t *)valuePtr, (uint64_t)arg);
00999 #endif
01000 }
01001 
01002 MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr (void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
01003 {
01004 #if MBED_ATOMIC_PTR_SIZE == 32
01005     return (void *)core_util_atomic_fetch_add_explicit_u32 ((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
01006 #else
01007     return (void *)core_util_atomic_fetch_add_explicit_u64 ((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
01008 #endif
01009 }
01010 
01011 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr (void *volatile *valuePtr, ptrdiff_t arg)
01012 {
01013 #if MBED_ATOMIC_PTR_SIZE == 32
01014     return (void *)core_util_atomic_fetch_sub_u32 ((volatile uint32_t *)valuePtr, (uint32_t)arg);
01015 #else
01016     return (void *)core_util_atomic_fetch_sub_u64 ((volatile uint64_t *)valuePtr, (uint64_t)arg);
01017 #endif
01018 }
01019 
01020 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr (void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
01021 {
01022 #if MBED_ATOMIC_PTR_SIZE == 32
01023     return (void *)core_util_atomic_fetch_sub_explicit_u32 ((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
01024 #else
01025     return (void *)core_util_atomic_fetch_sub_explicit_u64 ((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
01026 #endif
01027 }
01028 
01029 /***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS  *****************/
01030 
01031 /* Need to throw away the ordering information for all locked operations */
01032 MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64 (const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
01033 {
01034     MBED_CHECK_LOAD_ORDER(order);
01035     return core_util_atomic_load_u64 (valuePtr);
01036 }
01037 
01038 MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64 (const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
01039 {
01040     MBED_CHECK_LOAD_ORDER(order);
01041     return core_util_atomic_load_s64 (valuePtr);
01042 }
01043 
01044 MBED_FORCEINLINE void core_util_atomic_store_explicit_u64 (volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order)
01045 {
01046     MBED_CHECK_STORE_ORDER(order);
01047     core_util_atomic_store_u64 (valuePtr, desiredValue);
01048 }
01049 
01050 MBED_FORCEINLINE void core_util_atomic_store_explicit_s64 (volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order)
01051 {
01052     MBED_CHECK_STORE_ORDER(order);
01053     core_util_atomic_store_s64 (valuePtr, desiredValue);
01054 }
01055 
01056 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix)                    \
01057 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(              \
01058         volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order)       \
01059 {                                                                               \
01060     return core_util_atomic_##name##_##fn_suffix(valuePtr, arg);                \
01061 }
01062 
01063 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix)                         \
01064 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix(           \
01065         volatile T *ptr, T *expectedCurrentValue, T desiredValue,               \
01066         MBED_UNUSED mbed_memory_order success,                                  \
01067         MBED_UNUSED mbed_memory_order failure)                                  \
01068 {                                                                               \
01069     MBED_CHECK_CAS_ORDER(success, failure);                                     \
01070     return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
01071 }
01072 
01073 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
01074 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
01075 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
01076 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
01077 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
01078 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
01079 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
01080 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
01081 
01082 #ifdef __cplusplus
01083 } // extern "C"
01084 
01085 /***************** TEMPLATE IMPLEMENTATIONS  *****************/
01086 
01087 /* Each of these groups provides specialisations for the T template for each of
01088  * the small types (there is no base implementation), and the base implementation
01089  * of the T * template.
01090  */
01091 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix)                              \
01092 template<>                                                                      \
01093 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept             \
01094 {                                                                               \
01095     return core_util_atomic_load_##fn_suffix(valuePtr);                         \
01096 }                                                                               \
01097                                                                                 \
01098 template<>                                                                      \
01099 inline T core_util_atomic_load(const T *valuePtr) noexcept                      \
01100 {                                                                               \
01101     return core_util_atomic_load_##fn_suffix(valuePtr);                         \
01102 }                                                                               \
01103                                                                                 \
01104 template<>                                                                      \
01105 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order)  noexcept \
01106 {                                                                               \
01107     return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order);         \
01108 }                                                                               \
01109                                                                                 \
01110 template<>                                                                      \
01111 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
01112 {                                                                               \
01113     return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order);         \
01114 }
01115 
01116 template<typename T>
01117 inline T *core_util_atomic_load (T *const volatile *valuePtr) noexcept
01118 {
01119     return (T *) core_util_atomic_load_ptr ((void *const volatile *) valuePtr);
01120 }
01121 
01122 template<typename T>
01123 inline T *core_util_atomic_load (T *const *valuePtr) noexcept
01124 {
01125     return (T *) core_util_atomic_load_ptr ((void *const *) valuePtr);
01126 }
01127 
01128 template<typename T>
01129 inline T *core_util_atomic_load_explicit (T *const volatile *valuePtr, mbed_memory_order order) noexcept
01130 {
01131     return (T *) core_util_atomic_load_explicit_ptr ((void *const volatile *) valuePtr, order);
01132 }
01133 
01134 template<typename T>
01135 inline T *core_util_atomic_load_explicit (T *const *valuePtr, mbed_memory_order order) noexcept
01136 {
01137     return (T *) core_util_atomic_load_explicit_ptr ((void *const *) valuePtr, order);
01138 }
01139 
01140 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t,  u8)
01141 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
01142 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
01143 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
01144 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t,  s8)
01145 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
01146 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
01147 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
01148 DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
01149 
01150 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix)                             \
01151 template<>                                                                      \
01152 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept        \
01153 {                                                                               \
01154     core_util_atomic_store_##fn_suffix(valuePtr, val);                          \
01155 }                                                                               \
01156                                                                                 \
01157 template<>                                                                      \
01158 inline void core_util_atomic_store(T *valuePtr, T val) noexcept                 \
01159 {                                                                               \
01160     core_util_atomic_store_##fn_suffix(valuePtr, val);                          \
01161 }                                                                               \
01162                                                                                 \
01163 template<>                                                                      \
01164 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
01165 {                                                                               \
01166     core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order);          \
01167 }                                                                               \
01168                                                                                 \
01169 template<>                                                                      \
01170 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
01171 {                                                                               \
01172     core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order);          \
01173 }
01174 
01175 template<typename T>
01176 inline void core_util_atomic_store (T *volatile *valuePtr, T *val) noexcept
01177 {
01178     core_util_atomic_store_ptr ((void *volatile *) valuePtr, val);
01179 }
01180 
01181 template<typename T>
01182 inline void core_util_atomic_store (T **valuePtr, T *val) noexcept
01183 {
01184     core_util_atomic_store_ptr ((void **) valuePtr, val);
01185 }
01186 
01187 template<typename T>
01188 inline void core_util_atomic_store_explicit (T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
01189 {
01190     core_util_atomic_store_ptr ((void *volatile *) valuePtr, val, order);
01191 }
01192 
01193 template<typename T>
01194 inline void core_util_atomic_store_explicit (T **valuePtr, T *val, mbed_memory_order order) noexcept
01195 {
01196     core_util_atomic_store_ptr ((void **) valuePtr, val, order);
01197 }
01198 
01199 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t,  u8)
01200 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
01201 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
01202 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
01203 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t,  s8)
01204 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
01205 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
01206 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
01207 DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
01208 
01209 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix)                             \
01210 template<> inline                                                                           \
01211 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
01212 {                                                                                           \
01213     return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
01214 }
01215 
01216 template<typename T>
01217 inline bool core_util_atomic_compare_exchange_strong (T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
01218 {
01219     return core_util_atomic_cas_ptr ((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
01220 }
01221 
01222 template<typename T>
01223 inline bool core_util_atomic_compare_exchange_weak (T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
01224 {
01225     return core_util_atomic_compare_exchange_weak_ptr ((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
01226 }
01227 
01228 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \
01229         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t,  u8) \
01230         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \
01231         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \
01232         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \
01233         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t,   s8) \
01234         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t,  s16) \
01235         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t,  s32) \
01236         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t,  s64) \
01237         DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool)
01238 
01239 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
01240 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
01241 
01242 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix)                          \
01243 template<>                                                                      \
01244 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept          \
01245 {                                                                               \
01246     return core_util_atomic_##name##_##fn_suffix(valuePtr, arg);                \
01247 }                                                                               \
01248                                                                                 \
01249 template<>                                                                      \
01250 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg,        \
01251         mbed_memory_order order) noexcept                                       \
01252 {                                                                               \
01253     return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
01254 }
01255 
01256 
01257 template<>
01258 inline bool core_util_atomic_exchange (volatile bool *valuePtr, bool arg) noexcept
01259 {
01260     return core_util_atomic_exchange_bool (valuePtr, arg);
01261 }
01262 
01263 template<>
01264 inline bool core_util_atomic_exchange_explicit (volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
01265 {
01266     return core_util_atomic_exchange_explicit_bool (valuePtr, arg, order);
01267 }
01268 
01269 template<typename T>
01270 inline T *core_util_atomic_exchange (T *volatile *valuePtr, T *arg) noexcept
01271 {
01272     return (T *) core_util_atomic_exchange_ptr ((void *volatile *) valuePtr, arg);
01273 }
01274 
01275 template<typename T>
01276 inline T *core_util_atomic_exchange_explicit (T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
01277 {
01278     return (T *) core_util_atomic_fetch_add_explicit_ptr ((void *volatile *) valuePtr, arg, order);
01279 }
01280 
01281 template<typename T>
01282 inline T *core_util_atomic_fetch_add (T *volatile *valuePtr, ptrdiff_t arg) noexcept
01283 {
01284     return (T *) core_util_atomic_fetch_add_ptr ((void *volatile *) valuePtr, arg * sizeof(T));
01285 }
01286 
01287 template<typename T>
01288 inline T *core_util_atomic_fetch_add_explicit (T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
01289 {
01290     return (T *) core_util_atomic_fetch_add_explicit_ptr ((void *volatile *) valuePtr, arg * sizeof(T), order);
01291 }
01292 
01293 template<typename T>
01294 inline T *core_util_atomic_fetch_sub (T *volatile *valuePtr, ptrdiff_t arg) noexcept
01295 {
01296     return (T *) core_util_atomic_fetch_sub_ptr ((void *volatile *) valuePtr, arg * sizeof(T));
01297 }
01298 
01299 template<typename T>
01300 inline T *core_util_atomic_fetch_sub_explicit (T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
01301 {
01302     return (T *) core_util_atomic_fetch_sub_explicit_ptr ((void *volatile *) valuePtr, arg * sizeof(T), order);
01303 }
01304 
01305 
01306 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \
01307         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t,  u8) \
01308         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
01309         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
01310         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64)
01311 
01312 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \
01313         DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t,   s8) \
01314         DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t,  s16) \
01315         DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t,  s32) \
01316         DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t,  s64)
01317 
01318 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
01319 template<>                                                                      \
01320 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept          \
01321 {                                                                               \
01322     return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP;         \
01323 }                                                                               \
01324                                                                                 \
01325 template<>                                                                      \
01326 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg,        \
01327         mbed_memory_order order) noexcept                                       \
01328 {                                                                               \
01329     return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
01330 }
01331 
01332 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
01333 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
01334 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
01335 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
01336 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
01337 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
01338 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
01339 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
01340 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
01341 
01342 namespace mstd {
01343 namespace impl {
01344 
01345 // Use custom assembler forms for pre-ops where available, else construct from post-ops
01346 #if MBED_EXCLUSIVE_ACCESS
01347 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
01348         template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
01349         template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
01350         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
01351         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
01352         DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
01353         DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
01354 #else
01355 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
01356         template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
01357         template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
01358         DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
01359         DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
01360         DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
01361         DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
01362 #endif
01363 
01364 // *INDENT-OFF*
01365 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr,      fetch_add, + arg)
01366 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr,      fetch_sub, - arg)
01367 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
01368 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch,  fetch_or,  | arg)
01369 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
01370 // *INDENT-ON*
01371 
01372 }
01373 }
01374 
01375 #endif // __cplusplus
01376 
01377 #undef MBED_DOP_REG
01378 #undef MBED_CMP_IMM
01379 #undef MBED_SUB3_IMM
01380 #undef DO_MBED_LOCKFREE_EXCHG_ASM
01381 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM
01382 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM
01383 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM
01384 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
01385 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
01386 #undef DO_MBED_LOCKFREE_LOADSTORE
01387 #undef DO_MBED_LOCKFREE_EXCHG_OP
01388 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP
01389 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP
01390 #undef DO_MBED_LOCKFREE_NEWVAL_2OP
01391 #undef DO_MBED_LOCKFREE_OLDVAL_2OP
01392 #undef DO_MBED_LOCKFREE_OLDVAL_3OP
01393 #undef DO_MBED_LOCKFREE_EXCHG_OPS
01394 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS
01395 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS
01396 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS
01397 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
01398 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
01399 #undef DO_MBED_SIGNED_CAS_OP
01400 #undef DO_MBED_SIGNED_CAS_OPS
01401 #undef DO_MBED_SIGNED_FETCH_OP
01402 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP
01403 #undef DO_MBED_SIGNED_FETCH_OPS
01404 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS
01405 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS
01406 #undef DO_MBED_LOCKED_CAS_ORDERINGS
01407 #undef MBED_ACQUIRE_BARRIER
01408 #undef MBED_RELEASE_BARRIER
01409 #undef MBED_SEQ_CST_BARRIER
01410 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE
01411 #undef DO_MBED_ATOMIC_STORE_TEMPLATE
01412 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE
01413 #undef DO_MBED_ATOMIC_CAS_TEMPLATE
01414 #undef DO_MBED_ATOMIC_CAS_TEMPLATES
01415 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE
01416 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES
01417 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
01418 
01419 #endif