Mistake on this page?
Report an issue in GitHub or email us
mbed_atomic_impl.h
1 /*
2  * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License"); you may
6  * not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #ifndef __MBED_ATOMIC_IMPL_H__
19 #define __MBED_ATOMIC_IMPL_H__
20 
21 #ifndef __MBED_UTIL_ATOMIC_H__
22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h"
23 #endif
24 
25 #include <stdint.h>
26 #include "cmsis.h"
27 #include "platform/mbed_assert.h"
28 #include "platform/mbed_toolchain.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #ifdef MBED_DEBUG
35 /* Plain loads must not have "release" or "acquire+release" order */
36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel)
37 
38 /* Plain stores must not have "consume", "acquire" or "acquire+release" order */
39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel)
40 
41 /* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */
42 #define MBED_CHECK_CAS_ORDER(success, failure) \
43  MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel)
44 #else
45 #define MBED_CHECK_LOAD_ORDER(order) (void)0
46 #define MBED_CHECK_STORE_ORDER(order) (void)0
47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0
48 #endif
49 
50 /* This is currently just to silence unit tests, so no better test required */
51 #ifdef __MBED__
52 #define MBED_ATOMIC_PTR_SIZE 32
53 #else
54 #define MBED_ATOMIC_PTR_SIZE 64
55 #endif
56 
57 /* Place barrier after a load or read-modify-write if a consume or acquire operation */
58 #define MBED_ACQUIRE_BARRIER(order) do { \
59  if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \
60  MBED_BARRIER(); \
61  } } while (0)
62 
63 /* Place barrier before a store or read-modify-write if a release operation */
64 #define MBED_RELEASE_BARRIER(order) do { \
65  if ((order) & mbed_memory_order_release) { \
66  MBED_BARRIER(); \
67  } } while (0)
68 
69 /* Place barrier after a plain store if a sequentially consistent */
70 #define MBED_SEQ_CST_BARRIER(order) do { \
71  if ((order) == mbed_memory_order_seq_cst) { \
72  MBED_BARRIER(); \
73  } } while (0)
74 
75 
76 
77 #if MBED_EXCLUSIVE_ACCESS
78 
79 /* This header file provides C inline definitions for atomic functions. */
80 /* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */
81 
82 /****************************** ASSEMBLER **********************************/
83 
84 // Fiddle about with constraints. These work for GCC and clang, but
85 // IAR appears to be restricted to having only a single constraint,
86 // so we can't do immediates.
87 #if MBED_EXCLUSIVE_ACCESS_THUMB1
88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB
89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate
90 #define MBED_SUB3_IMM "L" // -7 to +7
91 #else
92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers
93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate
94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate
95 #endif
96 
97 // ARM C 5 inline assembler recommends against using LDREX/STREX
98 // for same reason as intrinsics, but there's no other way to get
99 // inlining. ARM C 5 is being retired anyway.
100 
101 #ifdef __CC_ARM
102 #pragma diag_suppress 3732
103 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
104  __asm { \
105  LDREX##M oldValue, [valuePtr] ; \
106  STREX##M fail, newValue, [valuePtr] \
107  }
108 #elif defined __clang__ || defined __GNUC__
109 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
110  __asm volatile ( \
111  ".syntax unified\n\t" \
112  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
113  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
114  : [oldValue] "=&r" (oldValue), \
115  [fail] "=&r" (fail), \
116  [value] "+Q" (*valuePtr) \
117  : [newValue] "r" (newValue) \
118  : \
119  )
120 #elif defined __ICCARM__
121 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
122 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
123  asm volatile ( \
124  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
125  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
126  : [oldValue] "=&r" (oldValue), \
127  [fail] "=&r" (fail) \
128  : [valuePtr] "r" (valuePtr), \
129  [newValue] "r" (newValue) \
130  : "memory" \
131  )
132 #endif
133 
134 #ifdef __CC_ARM
135 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
136  __asm { \
137  LDREX##M newValue, [valuePtr] ; \
138  OP newValue, arg ; \
139  STREX##M fail, newValue, [valuePtr] \
140  }
141 #elif defined __clang__ || defined __GNUC__
142 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
143  __asm volatile ( \
144  ".syntax unified\n\t" \
145  "LDREX"#M "\t%[newValue], %[value]\n\t" \
146  #OP "\t%[newValue], %[arg]\n\t" \
147  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
148  : [newValue] "=&" MBED_DOP_REG (newValue), \
149  [fail] "=&r" (fail), \
150  [value] "+Q" (*valuePtr) \
151  : [arg] Constants MBED_DOP_REG (arg) \
152  : "cc" \
153  )
154 #elif defined __ICCARM__
155 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
156 /* IAR does not support "ADDS reg, reg", so write as 3-operand */
157 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
158  asm volatile ( \
159  "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \
160  #OP "\t%[newValue], %[newValue], %[arg]\n" \
161  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
162  : [newValue] "=&r" (newValue), \
163  [fail] "=&r" (fail) \
164  : [valuePtr] "r" (valuePtr), \
165  [arg] "r" (arg) \
166  : "memory", "cc" \
167  )
168 #endif
169 
170 #ifdef __CC_ARM
171 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
172  __asm { \
173  LDREX##M oldValue, [valuePtr] ; \
174  OP newValue, oldValue, arg ; \
175  STREX##M fail, newValue, [valuePtr] \
176  }
177 #elif defined __clang__ || defined __GNUC__
178 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
179  __asm volatile ( \
180  ".syntax unified\n\t" \
181  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
182  #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \
183  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
184  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
185  [newValue] "=&" MBED_DOP_REG (newValue), \
186  [fail] "=&r" (fail), \
187  [value] "+Q" (*valuePtr) \
188  : [arg] Constants MBED_DOP_REG (arg) \
189  : "cc" \
190  )
191 #elif defined __ICCARM__
192 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
193 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
194  asm volatile ( \
195  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
196  #OP "\t%[newValue], %[oldValue], %[arg]\n" \
197  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
198  : [oldValue] "=&r" (oldValue), \
199  [newValue] "=&r" (newValue), \
200  [fail] "=&r" (fail) \
201  : [valuePtr] "r" (valuePtr), \
202  [arg] "r" (arg) \
203  : "memory", "cc" \
204  )
205 #endif
206 
207 /* Bitwise operations are harder to do in ARMv8-M baseline - there
208  * are only 2-operand versions of the instructions.
209  */
210 #ifdef __CC_ARM
211 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
212  __asm { \
213  LDREX##M oldValue, [valuePtr] ; \
214  MOV newValue, oldValue ; \
215  OP newValue, arg ; \
216  STREX##M fail, newValue, [valuePtr] \
217  }
218 #elif defined __clang__ || defined __GNUC__
219 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
220  __asm volatile ( \
221  ".syntax unified\n\t" \
222  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
223  "MOV" "\t%[newValue], %[oldValue]\n\t" \
224  #OP "\t%[newValue], %[arg]\n\t" \
225  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
226  : [oldValue] "=&r" (oldValue), \
227  [newValue] "=&l" (newValue), \
228  [fail] "=&r" (fail), \
229  [value] "+Q" (*valuePtr) \
230  : [arg] Constants "l" (arg) \
231  : "cc" \
232  )
233 #elif defined __ICCARM__
234 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
235  asm volatile ( \
236  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
237  "MOV" "\t%[newValue], %[oldValue]\n" \
238  #OP "\t%[newValue], %[arg]\n" \
239  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
240  : [oldValue] "=&r" (oldValue), \
241  [newValue] "=&r" (newValue), \
242  [fail] "=&r" (fail) \
243  : [valuePtr] "r" (valuePtr), \
244  [arg] "r" (arg) \
245  : "memory", "cc" \
246  )
247 #endif
248 
249 /* Note that we split ARM and Thumb implementations for CAS, as
250  * the key distinction is the handling of conditions. Thumb-2 IT is
251  * partially deprecated, so avoid it, making Thumb-1 and Thumb-2
252  * implementations the same.
253  */
254 #if MBED_EXCLUSIVE_ACCESS_ARM
255 #ifdef __CC_ARM
256 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
257  __asm { \
258  LDREX##M oldValue, [ptr] ; \
259  SUBS fail, oldValue, expectedValue ; \
260  STREX##M##EQ fail, desiredValue, [ptr] \
261  }
262 #elif defined __clang__ || defined __GNUC__
263 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
264  __asm volatile ( \
265  ".syntax unified\n\t" \
266  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
267  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
268  "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \
269  : [oldValue] "=&r" (oldValue), \
270  [fail] "=&r" (fail), \
271  [value] "+Q" (*ptr) \
272  : [desiredValue] "r" (desiredValue), \
273  [expectedValue] "ILr" (expectedValue) \
274  : "cc" \
275  )
276 #elif defined __ICCARM__
277 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
278  asm volatile ( \
279  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
280  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
281  "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
282  : [oldValue] "=&r" (oldValue), \
283  [fail] "=&r" (fail) \
284  : [desiredValue] "r" (desiredValue), \
285  [expectedValue] "r" (expectedValue), \
286  [valuePtr] "r" (ptr), \
287  : "memory", "cc" \
288  )
289 #endif
290 #else // MBED_EXCLUSIVE_ACCESS_ARM
291 #ifdef __CC_ARM
292 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
293  __asm { \
294  LDREX##M oldValue, [ptr] ; \
295  SUBS fail, oldValue, expectedValue ; \
296  BNE done ; \
297  STREX##M fail, desiredValue, [ptr] ; \
298 done: \
299  }
300 #elif defined __clang__ || defined __GNUC__
301 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
302  __asm volatile ( \
303  ".syntax unified\n\t" \
304  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
305  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
306  "BNE" "\t%=f\n\t" \
307  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \
308  "%=:" \
309  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
310  [fail] "=&" MBED_DOP_REG (fail), \
311  [value] "+Q" (*ptr) \
312  : [desiredValue] "r" (desiredValue), \
313  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
314  : "cc" \
315  )
316 #elif defined __ICCARM__
317 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
318  asm volatile ( \
319  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
320  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
321  "BNE" "\tdone\n\t" \
322  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
323  "done:" \
324  : [oldValue] "=&r" (oldValue), \
325  [fail] "=&r" (fail) \
326  : [desiredValue] "r" (desiredValue), \
327  [expectedValue] "r" (expectedValue), \
328  [valuePtr] "r" (ptr) \
329  : "memory", "cc" \
330  )
331 #endif
332 #endif // MBED_EXCLUSIVE_ACCESS_ARM
333 
334 /* For strong CAS, conditional execution is complex enough to
335  * not be worthwhile, so all implementations look like Thumb-1.
336  * (This is the operation for which STREX returning 0 for success
337  * is beneficial.)
338  */
339 #ifdef __CC_ARM
340 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
341  __asm { \
342  retry: ; \
343  LDREX##M oldValue, [ptr] ; \
344  SUBS fail, oldValue, expectedValue ; \
345  BNE done ; \
346  STREX##M fail, desiredValue, [ptr] ; \
347  CMP fail, 0 ; \
348  BNE retry ; \
349  done: \
350  }
351 #elif defined __clang__ || defined __GNUC__
352 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
353  __asm volatile ( \
354  ".syntax unified\n\t" \
355  "\n%=:\n\t" \
356  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
357  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
358  "BNE" "\t%=f\n" \
359  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \
360  "CMP" "\t%[fail], #0\n\t" \
361  "BNE" "\t%=b\n" \
362  "%=:" \
363  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
364  [fail] "=&" MBED_DOP_REG (fail), \
365  [value] "+Q" (*ptr) \
366  : [desiredValue] "r" (desiredValue), \
367  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
368  : "cc" \
369  )
370 #elif defined __ICCARM__
371 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
372  asm volatile ( \
373  "retry:\n" \
374  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
375  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
376  "BNE" "\tdone\n" \
377  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
378  "CMP" "\t%[fail], #0\n" \
379  "BNE" "\tretry\n" \
380  "done:" \
381  : [oldValue] "=&r" (oldValue), \
382  [fail] "=&r" (fail) \
383  : [desiredValue] "r" (desiredValue), \
384  [expectedValue] "r" (expectedValue), \
385  [valuePtr] "r" (ptr) \
386  : "memory", "cc" \
387  )
388 #endif
389 
390 /********************* LOCK-FREE IMPLEMENTATION MACROS ****************/
391 
392 /* Note care taken with types here. Values which the assembler outputs correctly
393  * narrowed, or inputs without caring about width, are marked as type T. Other
394  * values are uint32_t. It's not clear from documentation whether assembler
395  * assumes anything about widths, but try to signal correctly to get necessary
396  * narrowing, and avoid unnecessary.
397  * Tests show that GCC in particular will pass in unnarrowed values - eg passing
398  * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8,
399  * but wouldn't be for compare_and_exchange_u8.
400  * On the other hand, it seems to be impossible to stop GCC inserting narrowing
401  * instructions for the output - it will always put in UXTB for the oldValue of
402  * an operation.
403  */
404 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \
405 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \
406 { \
407  T oldValue; \
408  uint32_t fail; \
409  MBED_BARRIER(); \
410  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
411  MBED_BARRIER(); \
412  return oldValue; \
413 } \
414  \
415 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \
416  volatile T *valuePtr, T newValue, mbed_memory_order order) \
417 { \
418  T oldValue; \
419  uint32_t fail; \
420  MBED_RELEASE_BARRIER(order); \
421  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
422  MBED_ACQUIRE_BARRIER(order); \
423  return oldValue; \
424 }
425 
426 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \
427 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
428 { \
429  MBED_BARRIER(); \
430  T oldValue; \
431  uint32_t fail, expectedValue = *expectedCurrentValue; \
432  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
433  if (fail) { \
434  *expectedCurrentValue = oldValue; \
435  } \
436  MBED_BARRIER(); \
437  return !fail; \
438 } \
439  \
440 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
441 { \
442  MBED_CHECK_CAS_ORDER(success, failure); \
443  MBED_RELEASE_BARRIER(success); \
444  T oldValue; \
445  uint32_t fail, expectedValue = *expectedCurrentValue; \
446  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
447  if (fail) { \
448  *expectedCurrentValue = oldValue; \
449  } \
450  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
451  return !fail; \
452 }
453 
454 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \
455 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
456 { \
457  MBED_BARRIER(); \
458  T oldValue; \
459  uint32_t fail, expectedValue = *expectedCurrentValue; \
460  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
461  if (fail) { \
462  *expectedCurrentValue = oldValue; \
463  } \
464  MBED_BARRIER(); \
465  return !fail; \
466 } \
467  \
468 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
469 { \
470  MBED_CHECK_CAS_ORDER(success, failure); \
471  MBED_RELEASE_BARRIER(success); \
472  T oldValue; \
473  uint32_t fail, expectedValue = *expectedCurrentValue; \
474  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
475  if (fail) { \
476  *expectedCurrentValue = oldValue; \
477  } \
478  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
479  return !fail; \
480 }
481 
482 
483 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
484 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
485 { \
486  uint32_t fail, newValue; \
487  MBED_BARRIER(); \
488  do { \
489  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
490  } while (fail); \
491  MBED_BARRIER(); \
492  return (T) newValue; \
493 } \
494  \
495 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
496  volatile T *valuePtr, T arg, mbed_memory_order order) \
497 { \
498  uint32_t fail, newValue; \
499  MBED_RELEASE_BARRIER(order); \
500  do { \
501  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
502  } while (fail); \
503  MBED_ACQUIRE_BARRIER(order); \
504  return (T) newValue; \
505 } \
506 
507 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
508 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
509 { \
510  T oldValue; \
511  uint32_t fail, newValue; \
512  MBED_BARRIER(); \
513  do { \
514  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
515  } while (fail); \
516  MBED_BARRIER(); \
517  return oldValue; \
518 } \
519  \
520 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
521  volatile T *valuePtr, T arg, mbed_memory_order order) \
522 { \
523  T oldValue; \
524  uint32_t fail, newValue; \
525  MBED_RELEASE_BARRIER(order); \
526  do { \
527  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
528  } while (fail); \
529  MBED_ACQUIRE_BARRIER(order); \
530  return oldValue; \
531 } \
532 
533 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \
534 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \
535  T oldValue; \
536  uint32_t fail, newValue; \
537  MBED_BARRIER(); \
538  do { \
539  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
540  } while (fail); \
541  MBED_BARRIER(); \
542  return oldValue; \
543 } \
544  \
545 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
546  volatile T *valuePtr, T arg, mbed_memory_order order) \
547 { \
548  T oldValue; \
549  uint32_t fail, newValue; \
550  MBED_RELEASE_BARRIER(order); \
551  do { \
552  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
553  } while (fail); \
554  MBED_ACQUIRE_BARRIER(order); \
555  return oldValue; \
556 } \
557 
558 inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
559 {
560  MBED_BARRIER();
561  bool oldValue, newValue = true;
562  uint32_t fail;
563  do {
564  DO_MBED_LOCKFREE_EXCHG_ASM(B);
565  } while (fail);
566  MBED_BARRIER();
567  return oldValue;
568 }
569 
571 {
572  MBED_RELEASE_BARRIER(order);
573  bool oldValue, newValue = true;
574  uint32_t fail;
575  do {
576  DO_MBED_LOCKFREE_EXCHG_ASM(B);
577  } while (fail);
578  MBED_ACQUIRE_BARRIER(order);
579  return oldValue;
580 }
581 
582 /********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/
583 
584 #define DO_MBED_LOCKFREE_EXCHG_OPS() \
585  DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \
586  DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
587  DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, )
588 
589 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \
590  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
591  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
592  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, )
593 
594 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \
595  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \
596  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \
597  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, )
598 
599 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \
600  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
601  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
602  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, )
603 
604 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
605  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \
606  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \
607  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, )
608 
609 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \
610  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \
611  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
612  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, )
613 
614 // Note that these macros define a number of functions that are
615 // not in mbed_atomic.h, like core_util_atomic_and_fetch_u16.
616 // These are not documented via the doxygen in mbed_atomic.h, so
617 // for now should be regarded as internal only. They are used by the
618 // Atomic<T> template as an optimisation though.
619 
620 // We always use the "S" form of operations - avoids yet another
621 // possible unneeded distinction between Thumbv1 and Thumbv2, and
622 // may reduce code size by allowing 16-bit instructions.
623 #if !MBED_EXCLUSIVE_ACCESS_THUMB1
624 // I constraint is 12-bit modified immediate constant
625 // L constraint is negated 12-bit modified immediate constant
626 // (relying on assembler to swap ADD/SUB)
627 // We could permit J (-4095 to +4095) if we used ADD/SUB
628 // instead of ADDS/SUBS, but then that would block generation
629 // of the 16-bit forms. Shame we can't specify "don't care"
630 // for the "S", or get the GNU multi-alternative to
631 // choose ADDS/ADD appropriately.
632 
633 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL")
634 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
635 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
636 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
637 // K constraint is inverted 12-bit modified immediate constant
638 // (relying on assembler substituting BIC for AND)
639 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
640 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
641 #if MBED_EXCLUSIVE_ACCESS_ARM
642 // ARM does not have ORN instruction, so take plain immediates.
643 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "I")
644 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
645 #else
646 // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
647 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "IK")
648 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
649 #endif
650 // I constraint is 12-bit modified immediate operand
651 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I")
652 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
653 #else // MBED_EXCLUSIVE_ACCESS_THUMB1
654 // I constraint is 0-255; J is -255 to -1, suitable for
655 // 2-op ADD/SUB (relying on assembler to swap ADD/SUB)
656 // L constraint is -7 to +7, suitable for 3-op ADD/SUB
657 // (relying on assembler to swap ADD/SUB)
658 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L")
659 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
660 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
661 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
662 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
663 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
664 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
665 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
666 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
667 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
668 #endif
669 
670 DO_MBED_LOCKFREE_EXCHG_OPS()
671 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
672 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
673 
674 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
675  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
676 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
677  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
678 #else // MBED_EXCLUSIVE_ACCESS
679 /* All the operations are locked, so need no ordering parameters */
680 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
681  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \
682  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \
683  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \
684  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
685 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
686  DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \
687  DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \
688  DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \
689  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
690 
692 {
693  return core_util_atomic_flag_test_and_set(valuePtr);
694 }
695 #endif // MBED_EXCLUSIVE_ACCESS
696 
697 /********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/
698 
699 /* Lock-free loads and stores don't need assembler - just aligned accesses */
700 /* Silly ordering of `T volatile` is because T can be `void *` */
701 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \
702 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
703 { \
704  T value = *valuePtr; \
705  MBED_BARRIER(); \
706  return value; \
707 } \
708  \
709 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
710 { \
711  MBED_CHECK_LOAD_ORDER(order); \
712  T value = *valuePtr; \
713  MBED_ACQUIRE_BARRIER(order); \
714  return value; \
715 } \
716  \
717 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
718 { \
719  MBED_BARRIER(); \
720  *valuePtr = value; \
721  MBED_BARRIER(); \
722 } \
723  \
724 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
725 { \
726  MBED_CHECK_STORE_ORDER(order); \
727  MBED_RELEASE_BARRIER(order); \
728  *valuePtr = value; \
729  MBED_SEQ_CST_BARRIER(order); \
730 }
731 
733 {
734  MBED_BARRIER();
735  flagPtr->_flag = false;
736  MBED_BARRIER();
737 }
738 
740 {
741  MBED_CHECK_STORE_ORDER(order);
742  MBED_RELEASE_BARRIER(order);
743  flagPtr->_flag = false;
744  MBED_SEQ_CST_BARRIER(order);
745 }
746 
747 #ifdef __cplusplus
748 // Temporarily turn off extern "C", so we can provide non-volatile load/store
749 // overloads for efficiency. All these functions are static inline, so this has
750 // no linkage effect exactly, it just permits the overloads.
751 } // extern "C"
752 
753 // For efficiency it's worth having non-volatile overloads
755 {
756  MBED_BARRIER();
757  flagPtr->_flag = false;
758  MBED_BARRIER();
759 }
760 
762 {
763  MBED_RELEASE_BARRIER(order);
764  flagPtr->_flag = false;
765  MBED_SEQ_CST_BARRIER(order);
766 }
767 
768 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
769 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
770 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
771 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
772 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
773 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
774 DO_MBED_LOCKFREE_LOADSTORE(bool,, bool)
775 DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
776 
777 #endif
778 
779 DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8)
780 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
781 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
782 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
783 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
784 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
785 DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool)
786 DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr)
787 
788 #ifdef __cplusplus
789 extern "C" {
790 #endif
791 
792 /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
793 
794 MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
795 {
796  return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr);
797 }
798 
799 MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
800 {
801  core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
802 }
803 
804 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \
805 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \
806  T *expectedCurrentValue, T desiredValue) \
807 { \
808  return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \
809  (u##T *)expectedCurrentValue, (u##T)desiredValue); \
810 } \
811  \
812 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \
813  T *expectedCurrentValue, T desiredValue, \
814  mbed_memory_order success, mbed_memory_order failure) \
815 { \
816  return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \
817  (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \
818 }
819 
820 #define DO_MBED_SIGNED_CAS_OPS(name) \
821  DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \
822  DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \
823  DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \
824  DO_MBED_SIGNED_CAS_OP(name, int64_t, 64)
825 
826 DO_MBED_SIGNED_CAS_OPS(cas)
827 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
828 
829 MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
830 {
831  return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
832 }
833 
834 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
835 {
836  return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
837 }
838 
839 inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
840 {
841 #if MBED_ATOMIC_PTR_SIZE == 32
843  (volatile uint32_t *)ptr,
844  (uint32_t *)expectedCurrentValue,
845  (uint32_t)desiredValue);
846 #else
848  (volatile uint64_t *)ptr,
849  (uint64_t *)expectedCurrentValue,
850  (uint64_t)desiredValue);
851 #endif
852 }
853 
854 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
855 {
856 #if MBED_ATOMIC_PTR_SIZE == 32
858  (volatile uint32_t *)ptr,
859  (uint32_t *)expectedCurrentValue,
860  (uint32_t)desiredValue,
861  success, failure);
862 #else
864  (volatile uint64_t *)ptr,
865  (uint64_t *)expectedCurrentValue,
866  (uint64_t)desiredValue,
867  success, failure);
868 #endif
869 }
870 
871 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
872 {
873  return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
874 }
875 
876 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
877 {
878  return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
879 }
880 
881 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
882 {
883 #if MBED_ATOMIC_PTR_SIZE == 32
885  (volatile uint32_t *)ptr,
886  (uint32_t *)expectedCurrentValue,
887  (uint32_t)desiredValue);
888 #else
890  (volatile uint64_t *)ptr,
891  (uint64_t *)expectedCurrentValue,
892  (uint64_t)desiredValue);
893 #endif
894 }
895 
896 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
897 {
898 #if MBED_ATOMIC_PTR_SIZE == 32
900  (volatile uint32_t *)ptr,
901  (uint32_t *)expectedCurrentValue,
902  (uint32_t)desiredValue,
903  success, failure);
904 #else
906  (volatile uint64_t *)ptr,
907  (uint64_t *)expectedCurrentValue,
908  (uint64_t)desiredValue,
909  success, failure);
910 #endif
911 }
912 
913 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \
914 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \
915 { \
916  return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \
917 }
918 
919 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \
920 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \
921 { \
922  return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \
923 }
924 
925 #define DO_MBED_SIGNED_FETCH_OPS(name) \
926  DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \
927  DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \
928  DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \
929  DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64)
930 
931 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \
932  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \
933  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \
934  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \
935  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64)
936 
937 DO_MBED_SIGNED_FETCH_OPS(exchange)
938 DO_MBED_SIGNED_FETCH_OPS(incr)
939 DO_MBED_SIGNED_FETCH_OPS(decr)
940 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
941 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
942 
943 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
944 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
945 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
946 
947 MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
948 {
949  return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
950 }
951 
952 MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
953 {
954  return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order);
955 }
956 
957 inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
958 {
959 #if MBED_ATOMIC_PTR_SIZE == 32
960  return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
961 #else
962  return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
963 #endif
964 }
965 
966 MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
967 {
968 #if MBED_ATOMIC_PTR_SIZE == 32
969  return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order);
970 #else
971  return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order);
972 #endif
973 }
974 
975 inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
976 {
977 #if MBED_ATOMIC_PTR_SIZE == 32
978  return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
979 #else
980  return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
981 #endif
982 }
983 
984 inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
985 {
986 #if MBED_ATOMIC_PTR_SIZE == 32
987  return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
988 #else
989  return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
990 #endif
991 }
992 
993 MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
994 {
995 #if MBED_ATOMIC_PTR_SIZE == 32
996  return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
997 #else
998  return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
999 #endif
1000 }
1001 
1002 MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
1003 {
1004 #if MBED_ATOMIC_PTR_SIZE == 32
1005  return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
1006 #else
1007  return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
1008 #endif
1009 }
1010 
1011 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
1012 {
1013 #if MBED_ATOMIC_PTR_SIZE == 32
1014  return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
1015 #else
1016  return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
1017 #endif
1018 }
1019 
1020 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
1021 {
1022 #if MBED_ATOMIC_PTR_SIZE == 32
1023  return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
1024 #else
1025  return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
1026 #endif
1027 }
1028 
1029 /***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/
1030 
1031 /* Need to throw away the ordering information for all locked operations */
1032 MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
1033 {
1034  MBED_CHECK_LOAD_ORDER(order);
1035  return core_util_atomic_load_u64(valuePtr);
1036 }
1037 
1039 {
1040  MBED_CHECK_LOAD_ORDER(order);
1041  return core_util_atomic_load_s64(valuePtr);
1042 }
1043 
1044 MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order)
1045 {
1046  MBED_CHECK_STORE_ORDER(order);
1047  core_util_atomic_store_u64(valuePtr, desiredValue);
1048 }
1049 
1050 MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order)
1051 {
1052  MBED_CHECK_STORE_ORDER(order);
1053  core_util_atomic_store_s64(valuePtr, desiredValue);
1054 }
1055 
1056 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \
1057 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
1058  volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \
1059 { \
1060  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1061 }
1062 
1063 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \
1064 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \
1065  volatile T *ptr, T *expectedCurrentValue, T desiredValue, \
1066  MBED_UNUSED mbed_memory_order success, \
1067  MBED_UNUSED mbed_memory_order failure) \
1068 { \
1069  MBED_CHECK_CAS_ORDER(success, failure); \
1070  return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1071 }
1072 
1073 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1074 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1075 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1076 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1077 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1078 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1079 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1080 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1081 
1082 #ifdef __cplusplus
1083 } // extern "C"
1084 
1085 /***************** TEMPLATE IMPLEMENTATIONS *****************/
1086 
1087 /* Each of these groups provides specialisations for the T template for each of
1088  * the small types (there is no base implementation), and the base implementation
1089  * of the T * template.
1090  */
1091 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
1092 template<> \
1093 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
1094 { \
1095  return core_util_atomic_load_##fn_suffix(valuePtr); \
1096 } \
1097  \
1098 template<> \
1099 inline T core_util_atomic_load(const T *valuePtr) noexcept \
1100 { \
1101  return core_util_atomic_load_##fn_suffix(valuePtr); \
1102 } \
1103  \
1104 template<> \
1105 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
1106 { \
1107  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1108 } \
1109  \
1110 template<> \
1111 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
1112 { \
1113  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1114 }
1115 
1116 template<typename T>
1117 inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept
1118 {
1119  return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
1120 }
1121 
1122 template<typename T>
1123 inline T *core_util_atomic_load(T *const *valuePtr) noexcept
1124 {
1125  return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
1126 }
1127 
1128 template<typename T>
1129 inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept
1130 {
1131  return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
1132 }
1133 
1134 template<typename T>
1135 inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept
1136 {
1137  return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
1138 }
1139 
1140 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1141 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1142 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1143 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1144 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1145 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1146 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1147 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1148 DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
1149 
1150 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
1151 template<> \
1152 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
1153 { \
1154  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1155 } \
1156  \
1157 template<> \
1158 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
1159 { \
1160  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1161 } \
1162  \
1163 template<> \
1164 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
1165 { \
1166  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1167 } \
1168  \
1169 template<> \
1170 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
1171 { \
1172  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1173 }
1174 
1175 template<typename T>
1176 inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept
1177 {
1178  core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
1179 }
1180 
1181 template<typename T>
1182 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1183 {
1184  core_util_atomic_store_ptr((void **) valuePtr, val);
1185 }
1186 
1187 template<typename T>
1188 inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
1189 {
1190  core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
1191 }
1192 
1193 template<typename T>
1194 inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept
1195 {
1196  core_util_atomic_store_ptr((void **) valuePtr, val, order);
1197 }
1198 
1199 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1200 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1201 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1202 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1203 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1204 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1205 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1206 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1207 DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
1208 
1209 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
1210 template<> inline \
1211 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
1212 { \
1213  return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1214 }
1215 
1216 template<typename T>
1217 inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1218 {
1219  return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1220 }
1221 
1222 template<typename T>
1223 inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1224 {
1225  return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1226 }
1227 
1228 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \
1229  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \
1230  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \
1231  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \
1232  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \
1233  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \
1234  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \
1235  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \
1236  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \
1237  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool)
1238 
1239 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1240 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1241 
1242 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
1243 template<> \
1244 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1245 { \
1246  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1247 } \
1248  \
1249 template<> \
1250 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1251  mbed_memory_order order) noexcept \
1252 { \
1253  return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
1254 }
1255 
1256 
1257 template<>
1258 inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept
1259 {
1260  return core_util_atomic_exchange_bool(valuePtr, arg);
1261 }
1262 
1263 template<>
1264 inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
1265 {
1266  return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
1267 }
1268 
1269 template<typename T>
1270 inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept
1271 {
1272  return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
1273 }
1274 
1275 template<typename T>
1276 inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
1277 {
1278  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
1279 }
1280 
1281 template<typename T>
1282 inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1283 {
1284  return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1285 }
1286 
1287 template<typename T>
1288 inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1289 {
1290  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1291 }
1292 
1293 template<typename T>
1294 inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1295 {
1296  return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1297 }
1298 
1299 template<typename T>
1300 inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1301 {
1302  return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1303 }
1304 
1305 
1306 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \
1307  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1308  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1309  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1310  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64)
1311 
1312 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \
1313  DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \
1314  DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \
1315  DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \
1316  DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64)
1317 
1318 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
1319 template<> \
1320 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1321 { \
1322  return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
1323 } \
1324  \
1325 template<> \
1326 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1327  mbed_memory_order order) noexcept \
1328 { \
1329  return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
1330 }
1331 
1332 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1333 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1334 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1335 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1336 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1337 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1338 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1339 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1340 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1341 
1342 namespace mstd {
1343 namespace impl {
1344 
1345 // Use custom assembler forms for pre-ops where available, else construct from post-ops
1346 #if MBED_EXCLUSIVE_ACCESS
1347 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1348  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1349  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1350  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1351  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1352  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1353  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1354 #else
1355 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1356  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1357  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1358  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
1359  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
1360  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
1361  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1362 #endif
1363 
1364 // *INDENT-OFF*
1365 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1366 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1367 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1368 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1369 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1370 // *INDENT-ON*
1371 
1372 }
1373 }
1374 
1375 #endif // __cplusplus
1376 
1377 #undef MBED_DOP_REG
1378 #undef MBED_CMP_IMM
1379 #undef MBED_SUB3_IMM
1380 #undef DO_MBED_LOCKFREE_EXCHG_ASM
1381 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM
1382 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM
1383 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM
1384 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
1385 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
1386 #undef DO_MBED_LOCKFREE_LOADSTORE
1387 #undef DO_MBED_LOCKFREE_EXCHG_OP
1388 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP
1389 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP
1390 #undef DO_MBED_LOCKFREE_NEWVAL_2OP
1391 #undef DO_MBED_LOCKFREE_OLDVAL_2OP
1392 #undef DO_MBED_LOCKFREE_OLDVAL_3OP
1393 #undef DO_MBED_LOCKFREE_EXCHG_OPS
1394 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS
1395 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS
1396 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS
1397 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
1398 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
1399 #undef DO_MBED_SIGNED_CAS_OP
1400 #undef DO_MBED_SIGNED_CAS_OPS
1401 #undef DO_MBED_SIGNED_FETCH_OP
1402 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP
1403 #undef DO_MBED_SIGNED_FETCH_OPS
1404 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS
1405 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS
1406 #undef DO_MBED_LOCKED_CAS_ORDERINGS
1407 #undef MBED_ACQUIRE_BARRIER
1408 #undef MBED_RELEASE_BARRIER
1409 #undef MBED_SEQ_CST_BARRIER
1410 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE
1411 #undef DO_MBED_ATOMIC_STORE_TEMPLATE
1412 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE
1413 #undef DO_MBED_ATOMIC_CAS_TEMPLATE
1414 #undef DO_MBED_ATOMIC_CAS_TEMPLATES
1415 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE
1416 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES
1417 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
1418 
1419 #endif
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
Atomic clear.
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic store.
#define MBED_FORCEINLINE
MBED_FORCEINLINE Declare a function that must always be inlined.
MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic add.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic add.
bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *value)
Atomic store.
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic add.
MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic exchange.
bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
A lock-free, primitive atomic flag.
Definition: mbed_atomic.h:115
void * core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic decrement.
void * core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic increment.
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic decrement.
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic exchange.
mbed_memory_order
Memory order constraints for atomic operations.
Definition: mbed_atomic.h:51
void * core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
Atomic exchange.
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_test_and_set
MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic subtract.
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic exchange.
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
Atomic test and set.
MBED_FORCEINLINE void * core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic subtract.
MBED_FORCEINLINE void * core_util_atomic_load_ptr(void *const volatile *valuePtr)
Atomic load.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic subtract.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order)
Atomic load.
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic increment.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
Atomic store.
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_clear
#define MBED_UNUSED
MBED_UNUSED Declare a function argument to be unused, suppressing compiler warnings.
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic subtract.
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.