Mistake on this page?
Report an issue in GitHub or email us
mbed_atomic_impl.h
1 /*
2  * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License"); you may
6  * not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #ifndef __MBED_ATOMIC_IMPL_H__
19 #define __MBED_ATOMIC_IMPL_H__
20 
21 #ifndef __MBED_UTIL_ATOMIC_H__
22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h"
23 #endif
24 
25 #include <stdint.h>
26 #include "cmsis.h"
27 #include "platform/mbed_assert.h"
28 #include "platform/mbed_toolchain.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #ifdef MBED_DEBUG
35 /* Plain loads must not have "release" or "acquire+release" order */
36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel)
37 
38 /* Plain stores must not have "consume", "acquire" or "acquire+release" order */
39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel)
40 
41 /* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */
42 #define MBED_CHECK_CAS_ORDER(success, failure) \
43  MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel)
44 #else
45 #define MBED_CHECK_LOAD_ORDER(order) (void)0
46 #define MBED_CHECK_STORE_ORDER(order) (void)0
47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0
48 #endif
49 
50 /* This is currently just to silence unit tests, so no better test required */
51 #ifdef __MBED__
52 #define MBED_ATOMIC_PTR_SIZE 32
53 #else
54 #define MBED_ATOMIC_PTR_SIZE 64
55 #endif
56 
57 /* Place barrier after a load or read-modify-write if a consume or acquire operation */
58 #define MBED_ACQUIRE_BARRIER(order) do { \
59  if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \
60  MBED_BARRIER(); \
61  } } while (0)
62 
63 /* Place barrier before a store or read-modify-write if a release operation */
64 #define MBED_RELEASE_BARRIER(order) do { \
65  if ((order) & mbed_memory_order_release) { \
66  MBED_BARRIER(); \
67  } } while (0)
68 
69 /* Place barrier after a plain store if a sequentially consistent */
70 #define MBED_SEQ_CST_BARRIER(order) do { \
71  if ((order) == mbed_memory_order_seq_cst) { \
72  MBED_BARRIER(); \
73  } } while (0)
74 
75 
76 
77 #if MBED_EXCLUSIVE_ACCESS
78 
79 /* This header file provides C inline definitions for atomic functions. */
80 /* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */
81 
82 /****************************** ASSEMBLER **********************************/
83 
84 // Fiddle about with constraints. These work for GCC and clang, but
85 // IAR appears to be restricted to having only a single constraint,
86 // so we can't do immediates.
87 #if MBED_EXCLUSIVE_ACCESS_THUMB1
88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB
89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate
90 #define MBED_SUB3_IMM "L" // -7 to +7
91 #else
92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers
93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate
94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate
95 #endif
96 
97 #if defined __clang__ || defined __GNUC__
98 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
99  __asm volatile ( \
100  ".syntax unified\n\t" \
101  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
102  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
103  : [oldValue] "=&r" (oldValue), \
104  [fail] "=&r" (fail), \
105  [value] "+Q" (*valuePtr) \
106  : [newValue] "r" (newValue) \
107  : \
108  )
109 #elif defined __ICCARM__
110 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
111 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
112  asm volatile ( \
113  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
114  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
115  : [oldValue] "=&r" (oldValue), \
116  [fail] "=&r" (fail) \
117  : [valuePtr] "r" (valuePtr), \
118  [newValue] "r" (newValue) \
119  : "memory" \
120  )
121 #endif
122 
123 #if defined __clang__ || defined __GNUC__
124 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
125  __asm volatile ( \
126  ".syntax unified\n\t" \
127  "LDREX"#M "\t%[newValue], %[value]\n\t" \
128  #OP "\t%[newValue], %[arg]\n\t" \
129  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
130  : [newValue] "=&" MBED_DOP_REG (newValue), \
131  [fail] "=&r" (fail), \
132  [value] "+Q" (*valuePtr) \
133  : [arg] Constants MBED_DOP_REG (arg) \
134  : "cc" \
135  )
136 #elif defined __ICCARM__
137 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
138 /* IAR does not support "ADDS reg, reg", so write as 3-operand */
139 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
140  asm volatile ( \
141  "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \
142  #OP "\t%[newValue], %[newValue], %[arg]\n" \
143  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
144  : [newValue] "=&r" (newValue), \
145  [fail] "=&r" (fail) \
146  : [valuePtr] "r" (valuePtr), \
147  [arg] "r" (arg) \
148  : "memory", "cc" \
149  )
150 #endif
151 
152 #if defined __clang__ || defined __GNUC__
153 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
154  __asm volatile ( \
155  ".syntax unified\n\t" \
156  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
157  #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \
158  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
159  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
160  [newValue] "=&" MBED_DOP_REG (newValue), \
161  [fail] "=&r" (fail), \
162  [value] "+Q" (*valuePtr) \
163  : [arg] Constants MBED_DOP_REG (arg) \
164  : "cc" \
165  )
166 #elif defined __ICCARM__
167 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
168 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
169  asm volatile ( \
170  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
171  #OP "\t%[newValue], %[oldValue], %[arg]\n" \
172  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
173  : [oldValue] "=&r" (oldValue), \
174  [newValue] "=&r" (newValue), \
175  [fail] "=&r" (fail) \
176  : [valuePtr] "r" (valuePtr), \
177  [arg] "r" (arg) \
178  : "memory", "cc" \
179  )
180 #endif
181 
182 /* Bitwise operations are harder to do in ARMv8-M baseline - there
183  * are only 2-operand versions of the instructions.
184  */
185 #if defined __clang__ || defined __GNUC__
186 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
187  __asm volatile ( \
188  ".syntax unified\n\t" \
189  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
190  "MOV" "\t%[newValue], %[oldValue]\n\t" \
191  #OP "\t%[newValue], %[arg]\n\t" \
192  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
193  : [oldValue] "=&r" (oldValue), \
194  [newValue] "=&l" (newValue), \
195  [fail] "=&r" (fail), \
196  [value] "+Q" (*valuePtr) \
197  : [arg] Constants "l" (arg) \
198  : "cc" \
199  )
200 #elif defined __ICCARM__
201 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
202  asm volatile ( \
203  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
204  "MOV" "\t%[newValue], %[oldValue]\n" \
205  #OP "\t%[newValue], %[arg]\n" \
206  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
207  : [oldValue] "=&r" (oldValue), \
208  [newValue] "=&r" (newValue), \
209  [fail] "=&r" (fail) \
210  : [valuePtr] "r" (valuePtr), \
211  [arg] "r" (arg) \
212  : "memory", "cc" \
213  )
214 #endif
215 
216 /* Note that we split ARM and Thumb implementations for CAS, as
217  * the key distinction is the handling of conditions. Thumb-2 IT is
218  * partially deprecated, so avoid it, making Thumb-1 and Thumb-2
219  * implementations the same.
220  */
221 #if MBED_EXCLUSIVE_ACCESS_ARM
222 #if defined __clang__ || defined __GNUC__
223 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
224  __asm volatile ( \
225  ".syntax unified\n\t" \
226  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
227  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
228  "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \
229  : [oldValue] "=&r" (oldValue), \
230  [fail] "=&r" (fail), \
231  [value] "+Q" (*ptr) \
232  : [desiredValue] "r" (desiredValue), \
233  [expectedValue] "ILr" (expectedValue) \
234  : "cc" \
235  )
236 #elif defined __ICCARM__
237 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
238  asm volatile ( \
239  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
240  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
241  "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
242  : [oldValue] "=&r" (oldValue), \
243  [fail] "=&r" (fail) \
244  : [desiredValue] "r" (desiredValue), \
245  [expectedValue] "r" (expectedValue), \
246  [valuePtr] "r" (ptr), \
247  : "memory", "cc" \
248  )
249 #endif
250 #else // MBED_EXCLUSIVE_ACCESS_ARM
251 #if defined __clang__ || defined __GNUC__
252 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
253  __asm volatile ( \
254  ".syntax unified\n\t" \
255  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
256  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
257  "BNE" "\t%=f\n\t" \
258  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \
259  "%=:" \
260  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
261  [fail] "=&" MBED_DOP_REG (fail), \
262  [value] "+Q" (*ptr) \
263  : [desiredValue] "r" (desiredValue), \
264  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
265  : "cc" \
266  )
267 #elif defined __ICCARM__
268 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
269  asm volatile ( \
270  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
271  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
272  "BNE" "\tdone\n\t" \
273  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
274  "done:" \
275  : [oldValue] "=&r" (oldValue), \
276  [fail] "=&r" (fail) \
277  : [desiredValue] "r" (desiredValue), \
278  [expectedValue] "r" (expectedValue), \
279  [valuePtr] "r" (ptr) \
280  : "memory", "cc" \
281  )
282 #endif
283 #endif // MBED_EXCLUSIVE_ACCESS_ARM
284 
285 /* For strong CAS, conditional execution is complex enough to
286  * not be worthwhile, so all implementations look like Thumb-1.
287  * (This is the operation for which STREX returning 0 for success
288  * is beneficial.)
289  */
290 #if defined __clang__ || defined __GNUC__
291 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
292  __asm volatile ( \
293  ".syntax unified\n\t" \
294  "\n%=:\n\t" \
295  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
296  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
297  "BNE" "\t%=f\n" \
298  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \
299  "CMP" "\t%[fail], #0\n\t" \
300  "BNE" "\t%=b\n" \
301  "%=:" \
302  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
303  [fail] "=&" MBED_DOP_REG (fail), \
304  [value] "+Q" (*ptr) \
305  : [desiredValue] "r" (desiredValue), \
306  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
307  : "cc" \
308  )
309 #elif defined __ICCARM__
310 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
311  asm volatile ( \
312  "retry:\n" \
313  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
314  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
315  "BNE" "\tdone\n" \
316  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
317  "CMP" "\t%[fail], #0\n" \
318  "BNE" "\tretry\n" \
319  "done:" \
320  : [oldValue] "=&r" (oldValue), \
321  [fail] "=&r" (fail) \
322  : [desiredValue] "r" (desiredValue), \
323  [expectedValue] "r" (expectedValue), \
324  [valuePtr] "r" (ptr) \
325  : "memory", "cc" \
326  )
327 #endif
328 
329 /********************* LOCK-FREE IMPLEMENTATION MACROS ****************/
330 
331 /* Note care taken with types here. Values which the assembler outputs correctly
332  * narrowed, or inputs without caring about width, are marked as type T. Other
333  * values are uint32_t. It's not clear from documentation whether assembler
334  * assumes anything about widths, but try to signal correctly to get necessary
335  * narrowing, and avoid unnecessary.
336  * Tests show that GCC in particular will pass in unnarrowed values - eg passing
337  * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8,
338  * but wouldn't be for compare_and_exchange_u8.
339  * On the other hand, it seems to be impossible to stop GCC inserting narrowing
340  * instructions for the output - it will always put in UXTB for the oldValue of
341  * an operation.
342  */
343 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \
344 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \
345 { \
346  T oldValue; \
347  uint32_t fail; \
348  MBED_BARRIER(); \
349  do { \
350  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
351  } while (fail); \
352  MBED_BARRIER(); \
353  return oldValue; \
354 } \
355  \
356 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \
357  volatile T *valuePtr, T newValue, mbed_memory_order order) \
358 { \
359  T oldValue; \
360  uint32_t fail; \
361  MBED_RELEASE_BARRIER(order); \
362  do { \
363  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
364  } while (fail); \
365  MBED_ACQUIRE_BARRIER(order); \
366  return oldValue; \
367 }
368 
369 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \
370 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
371 { \
372  MBED_BARRIER(); \
373  T oldValue; \
374  uint32_t fail, expectedValue = *expectedCurrentValue; \
375  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
376  if (fail) { \
377  *expectedCurrentValue = oldValue; \
378  } \
379  MBED_BARRIER(); \
380  return !fail; \
381 } \
382  \
383 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
384 { \
385  MBED_CHECK_CAS_ORDER(success, failure); \
386  MBED_RELEASE_BARRIER(success); \
387  T oldValue; \
388  uint32_t fail, expectedValue = *expectedCurrentValue; \
389  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
390  if (fail) { \
391  *expectedCurrentValue = oldValue; \
392  } \
393  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
394  return !fail; \
395 }
396 
397 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \
398 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
399 { \
400  MBED_BARRIER(); \
401  T oldValue; \
402  uint32_t fail, expectedValue = *expectedCurrentValue; \
403  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
404  if (fail) { \
405  *expectedCurrentValue = oldValue; \
406  } \
407  MBED_BARRIER(); \
408  return !fail; \
409 } \
410  \
411 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
412 { \
413  MBED_CHECK_CAS_ORDER(success, failure); \
414  MBED_RELEASE_BARRIER(success); \
415  T oldValue; \
416  uint32_t fail, expectedValue = *expectedCurrentValue; \
417  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
418  if (fail) { \
419  *expectedCurrentValue = oldValue; \
420  } \
421  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
422  return !fail; \
423 }
424 
425 
426 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
427 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
428 { \
429  uint32_t fail, newValue; \
430  MBED_BARRIER(); \
431  do { \
432  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
433  } while (fail); \
434  MBED_BARRIER(); \
435  return (T) newValue; \
436 } \
437  \
438 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
439  volatile T *valuePtr, T arg, mbed_memory_order order) \
440 { \
441  uint32_t fail, newValue; \
442  MBED_RELEASE_BARRIER(order); \
443  do { \
444  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
445  } while (fail); \
446  MBED_ACQUIRE_BARRIER(order); \
447  return (T) newValue; \
448 } \
449 
450 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
451 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
452 { \
453  T oldValue; \
454  uint32_t fail, newValue; \
455  MBED_BARRIER(); \
456  do { \
457  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
458  } while (fail); \
459  MBED_BARRIER(); \
460  return oldValue; \
461 } \
462  \
463 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
464  volatile T *valuePtr, T arg, mbed_memory_order order) \
465 { \
466  T oldValue; \
467  uint32_t fail, newValue; \
468  MBED_RELEASE_BARRIER(order); \
469  do { \
470  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
471  } while (fail); \
472  MBED_ACQUIRE_BARRIER(order); \
473  return oldValue; \
474 } \
475 
476 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \
477 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \
478  T oldValue; \
479  uint32_t fail, newValue; \
480  MBED_BARRIER(); \
481  do { \
482  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
483  } while (fail); \
484  MBED_BARRIER(); \
485  return oldValue; \
486 } \
487  \
488 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
489  volatile T *valuePtr, T arg, mbed_memory_order order) \
490 { \
491  T oldValue; \
492  uint32_t fail, newValue; \
493  MBED_RELEASE_BARRIER(order); \
494  do { \
495  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
496  } while (fail); \
497  MBED_ACQUIRE_BARRIER(order); \
498  return oldValue; \
499 } \
500 
501 inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
502 {
503  MBED_BARRIER();
504  bool oldValue, newValue = true;
505  uint32_t fail;
506  do {
507  DO_MBED_LOCKFREE_EXCHG_ASM(B);
508  } while (fail);
509  MBED_BARRIER();
510  return oldValue;
511 }
512 
514 {
515  MBED_RELEASE_BARRIER(order);
516  bool oldValue, newValue = true;
517  uint32_t fail;
518  do {
519  DO_MBED_LOCKFREE_EXCHG_ASM(B);
520  } while (fail);
521  MBED_ACQUIRE_BARRIER(order);
522  return oldValue;
523 }
524 
525 /********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/
526 
527 #define DO_MBED_LOCKFREE_EXCHG_OPS() \
528  DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \
529  DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
530  DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, )
531 
532 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \
533  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
534  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
535  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, )
536 
537 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \
538  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \
539  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \
540  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, )
541 
542 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \
543  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
544  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
545  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, )
546 
547 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
548  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \
549  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \
550  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, )
551 
552 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \
553  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \
554  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
555  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, )
556 
557 // Note that these macros define a number of functions that are
558 // not in mbed_atomic.h, like core_util_atomic_and_fetch_u16.
559 // These are not documented via the doxygen in mbed_atomic.h, so
560 // for now should be regarded as internal only. They are used by the
561 // Atomic<T> template as an optimisation though.
562 
563 // We always use the "S" form of operations - avoids yet another
564 // possible unneeded distinction between Thumbv1 and Thumbv2, and
565 // may reduce code size by allowing 16-bit instructions.
566 #if !MBED_EXCLUSIVE_ACCESS_THUMB1
567 // I constraint is 12-bit modified immediate constant
568 // L constraint is negated 12-bit modified immediate constant
569 // (relying on assembler to swap ADD/SUB)
570 // We could permit J (-4095 to +4095) if we used ADD/SUB
571 // instead of ADDS/SUBS, but then that would block generation
572 // of the 16-bit forms. Shame we can't specify "don't care"
573 // for the "S", or get the GNU multi-alternative to
574 // choose ADDS/ADD appropriately.
575 
576 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL")
577 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
578 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
579 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
580 // K constraint is inverted 12-bit modified immediate constant
581 // (relying on assembler substituting BIC for AND)
582 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
583 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
584 #if MBED_EXCLUSIVE_ACCESS_ARM
585 // ARM does not have ORN instruction, so take plain immediates.
586 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "I")
587 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
588 #else
589 // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
590 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "IK")
591 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
592 #endif
593 // I constraint is 12-bit modified immediate operand
594 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I")
595 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
596 #else // MBED_EXCLUSIVE_ACCESS_THUMB1
597 // I constraint is 0-255; J is -255 to -1, suitable for
598 // 2-op ADD/SUB (relying on assembler to swap ADD/SUB)
599 // L constraint is -7 to +7, suitable for 3-op ADD/SUB
600 // (relying on assembler to swap ADD/SUB)
601 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L")
602 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
603 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
604 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
605 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
606 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
607 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
608 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
609 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
610 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
611 #endif
612 
613 DO_MBED_LOCKFREE_EXCHG_OPS()
614 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
615 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
616 
617 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
618  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
619 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
620  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
621 #else // MBED_EXCLUSIVE_ACCESS
622 /* All the operations are locked, so need no ordering parameters */
623 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
624  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \
625  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \
626  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \
627  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
628 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
629  DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \
630  DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \
631  DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \
632  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
633 
635 {
636  return core_util_atomic_flag_test_and_set(valuePtr);
637 }
638 #endif // MBED_EXCLUSIVE_ACCESS
639 
640 /********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/
641 
642 /* Lock-free loads and stores don't need assembler - just aligned accesses */
643 /* Silly ordering of `T volatile` is because T can be `void *` */
644 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \
645 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
646 { \
647  T value = *valuePtr; \
648  MBED_BARRIER(); \
649  return value; \
650 } \
651  \
652 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
653 { \
654  MBED_CHECK_LOAD_ORDER(order); \
655  T value = *valuePtr; \
656  MBED_ACQUIRE_BARRIER(order); \
657  return value; \
658 } \
659  \
660 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
661 { \
662  MBED_BARRIER(); \
663  *valuePtr = value; \
664  MBED_BARRIER(); \
665 } \
666  \
667 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
668 { \
669  MBED_CHECK_STORE_ORDER(order); \
670  MBED_RELEASE_BARRIER(order); \
671  *valuePtr = value; \
672  MBED_SEQ_CST_BARRIER(order); \
673 }
674 
676 {
677  MBED_BARRIER();
678  flagPtr->_flag = false;
679  MBED_BARRIER();
680 }
681 
683 {
684  MBED_CHECK_STORE_ORDER(order);
685  MBED_RELEASE_BARRIER(order);
686  flagPtr->_flag = false;
687  MBED_SEQ_CST_BARRIER(order);
688 }
689 
690 #ifdef __cplusplus
691 // Temporarily turn off extern "C", so we can provide non-volatile load/store
692 // overloads for efficiency. All these functions are static inline, so this has
693 // no linkage effect exactly, it just permits the overloads.
694 } // extern "C"
695 
696 // For efficiency it's worth having non-volatile overloads
698 {
699  MBED_BARRIER();
700  flagPtr->_flag = false;
701  MBED_BARRIER();
702 }
703 
705 {
706  MBED_RELEASE_BARRIER(order);
707  flagPtr->_flag = false;
708  MBED_SEQ_CST_BARRIER(order);
709 }
710 
711 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
712 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
713 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
714 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
715 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
716 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
717 DO_MBED_LOCKFREE_LOADSTORE(bool,, bool)
718 DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
719 
720 #endif
721 
722 DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8)
723 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
724 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
725 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
726 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
727 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
728 DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool)
729 DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr)
730 
731 #ifdef __cplusplus
732 extern "C" {
733 #endif
734 
735 /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
736 
737 MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
738 {
739  return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr);
740 }
741 
742 MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
743 {
744  core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
745 }
746 
747 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \
748 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \
749  T *expectedCurrentValue, T desiredValue) \
750 { \
751  return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \
752  (u##T *)expectedCurrentValue, (u##T)desiredValue); \
753 } \
754  \
755 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \
756  T *expectedCurrentValue, T desiredValue, \
757  mbed_memory_order success, mbed_memory_order failure) \
758 { \
759  return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \
760  (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \
761 }
762 
763 #define DO_MBED_SIGNED_CAS_OPS(name) \
764  DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \
765  DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \
766  DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \
767  DO_MBED_SIGNED_CAS_OP(name, int64_t, 64)
768 
769 DO_MBED_SIGNED_CAS_OPS(cas)
770 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
771 
772 MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
773 {
774  return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
775 }
776 
777 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
778 {
779  return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
780 }
781 
782 inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
783 {
784 #if MBED_ATOMIC_PTR_SIZE == 32
786  (volatile uint32_t *)ptr,
787  (uint32_t *)expectedCurrentValue,
788  (uint32_t)desiredValue);
789 #else
791  (volatile uint64_t *)ptr,
792  (uint64_t *)expectedCurrentValue,
793  (uint64_t)desiredValue);
794 #endif
795 }
796 
797 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
798 {
799 #if MBED_ATOMIC_PTR_SIZE == 32
801  (volatile uint32_t *)ptr,
802  (uint32_t *)expectedCurrentValue,
803  (uint32_t)desiredValue,
804  success, failure);
805 #else
807  (volatile uint64_t *)ptr,
808  (uint64_t *)expectedCurrentValue,
809  (uint64_t)desiredValue,
810  success, failure);
811 #endif
812 }
813 
814 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
815 {
816  return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
817 }
818 
819 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
820 {
821  return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
822 }
823 
824 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
825 {
826 #if MBED_ATOMIC_PTR_SIZE == 32
828  (volatile uint32_t *)ptr,
829  (uint32_t *)expectedCurrentValue,
830  (uint32_t)desiredValue);
831 #else
833  (volatile uint64_t *)ptr,
834  (uint64_t *)expectedCurrentValue,
835  (uint64_t)desiredValue);
836 #endif
837 }
838 
839 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
840 {
841 #if MBED_ATOMIC_PTR_SIZE == 32
843  (volatile uint32_t *)ptr,
844  (uint32_t *)expectedCurrentValue,
845  (uint32_t)desiredValue,
846  success, failure);
847 #else
849  (volatile uint64_t *)ptr,
850  (uint64_t *)expectedCurrentValue,
851  (uint64_t)desiredValue,
852  success, failure);
853 #endif
854 }
855 
856 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \
857 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \
858 { \
859  return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \
860 }
861 
862 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \
863 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \
864 { \
865  return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \
866 }
867 
868 #define DO_MBED_SIGNED_FETCH_OPS(name) \
869  DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \
870  DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \
871  DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \
872  DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64)
873 
874 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \
875  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \
876  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \
877  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \
878  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64)
879 
880 DO_MBED_SIGNED_FETCH_OPS(exchange)
881 DO_MBED_SIGNED_FETCH_OPS(incr)
882 DO_MBED_SIGNED_FETCH_OPS(decr)
883 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
884 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
885 
886 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
887 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
888 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
889 
890 MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
891 {
892  return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
893 }
894 
895 MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
896 {
897  return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order);
898 }
899 
900 inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
901 {
902 #if MBED_ATOMIC_PTR_SIZE == 32
903  return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
904 #else
905  return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
906 #endif
907 }
908 
909 MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
910 {
911 #if MBED_ATOMIC_PTR_SIZE == 32
912  return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order);
913 #else
914  return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order);
915 #endif
916 }
917 
918 inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
919 {
920 #if MBED_ATOMIC_PTR_SIZE == 32
921  return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
922 #else
923  return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
924 #endif
925 }
926 
927 inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
928 {
929 #if MBED_ATOMIC_PTR_SIZE == 32
930  return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
931 #else
932  return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
933 #endif
934 }
935 
936 MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
937 {
938 #if MBED_ATOMIC_PTR_SIZE == 32
939  return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
940 #else
941  return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
942 #endif
943 }
944 
945 MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
946 {
947 #if MBED_ATOMIC_PTR_SIZE == 32
948  return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
949 #else
950  return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
951 #endif
952 }
953 
954 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
955 {
956 #if MBED_ATOMIC_PTR_SIZE == 32
957  return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
958 #else
959  return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
960 #endif
961 }
962 
963 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
964 {
965 #if MBED_ATOMIC_PTR_SIZE == 32
966  return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
967 #else
968  return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
969 #endif
970 }
971 
972 /***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/
973 
974 /* Need to throw away the ordering information for all locked operations */
975 MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
976 {
977  MBED_CHECK_LOAD_ORDER(order);
978  return core_util_atomic_load_u64(valuePtr);
979 }
980 
982 {
983  MBED_CHECK_LOAD_ORDER(order);
984  return core_util_atomic_load_s64(valuePtr);
985 }
986 
987 MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order)
988 {
989  MBED_CHECK_STORE_ORDER(order);
990  core_util_atomic_store_u64(valuePtr, desiredValue);
991 }
992 
993 MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order)
994 {
995  MBED_CHECK_STORE_ORDER(order);
996  core_util_atomic_store_s64(valuePtr, desiredValue);
997 }
998 
999 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \
1000 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
1001  volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \
1002 { \
1003  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1004 }
1005 
1006 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \
1007 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \
1008  volatile T *ptr, T *expectedCurrentValue, T desiredValue, \
1009  MBED_UNUSED mbed_memory_order success, \
1010  MBED_UNUSED mbed_memory_order failure) \
1011 { \
1012  MBED_CHECK_CAS_ORDER(success, failure); \
1013  return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1014 }
1015 
1016 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1017 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1018 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1019 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1020 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1021 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1022 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1023 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1024 
1025 #ifdef __cplusplus
1026 } // extern "C"
1027 
1028 /***************** TEMPLATE IMPLEMENTATIONS *****************/
1029 
1030 /* Each of these groups provides specialisations for the T template for each of
1031  * the small types (there is no base implementation), and the base implementation
1032  * of the T * template.
1033  */
1034 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
1035 template<> \
1036 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
1037 { \
1038  return core_util_atomic_load_##fn_suffix(valuePtr); \
1039 } \
1040  \
1041 template<> \
1042 inline T core_util_atomic_load(const T *valuePtr) noexcept \
1043 { \
1044  return core_util_atomic_load_##fn_suffix(valuePtr); \
1045 } \
1046  \
1047 template<> \
1048 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
1049 { \
1050  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1051 } \
1052  \
1053 template<> \
1054 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
1055 { \
1056  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1057 }
1058 
1059 template<typename T>
1060 inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept
1061 {
1062  return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
1063 }
1064 
1065 template<typename T>
1066 inline T *core_util_atomic_load(T *const *valuePtr) noexcept
1067 {
1068  return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
1069 }
1070 
1071 template<typename T>
1072 inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept
1073 {
1074  return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
1075 }
1076 
1077 template<typename T>
1078 inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept
1079 {
1080  return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
1081 }
1082 
1083 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1084 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1085 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1086 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1087 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1088 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1089 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1090 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1091 DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
1092 
1093 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
1094 template<> \
1095 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
1096 { \
1097  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1098 } \
1099  \
1100 template<> \
1101 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
1102 { \
1103  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1104 } \
1105  \
1106 template<> \
1107 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
1108 { \
1109  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1110 } \
1111  \
1112 template<> \
1113 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
1114 { \
1115  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1116 }
1117 
1118 template<typename T>
1119 inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept
1120 {
1121  core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
1122 }
1123 
1124 template<typename T>
1125 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1126 {
1127  core_util_atomic_store_ptr((void **) valuePtr, val);
1128 }
1129 
1130 template<typename T>
1131 inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
1132 {
1133  core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
1134 }
1135 
1136 template<typename T>
1137 inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept
1138 {
1139  core_util_atomic_store_ptr((void **) valuePtr, val, order);
1140 }
1141 
1142 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1143 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1144 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1145 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1146 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1147 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1148 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1149 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1150 DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
1151 
1152 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
1153 template<> inline \
1154 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
1155 { \
1156  return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1157 }
1158 
1159 template<typename T>
1160 inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1161 {
1162  return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1163 }
1164 
1165 template<typename T>
1166 inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1167 {
1168  return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1169 }
1170 
1171 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \
1172  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \
1173  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \
1174  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \
1175  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \
1176  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \
1177  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \
1178  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \
1179  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \
1180  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool)
1181 
1182 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1183 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1184 
1185 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
1186 template<> \
1187 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1188 { \
1189  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1190 } \
1191  \
1192 template<> \
1193 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1194  mbed_memory_order order) noexcept \
1195 { \
1196  return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
1197 }
1198 
1199 
1200 template<>
1201 inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept
1202 {
1203  return core_util_atomic_exchange_bool(valuePtr, arg);
1204 }
1205 
1206 template<>
1207 inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
1208 {
1209  return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
1210 }
1211 
1212 template<typename T>
1213 inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept
1214 {
1215  return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
1216 }
1217 
1218 template<typename T>
1219 inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
1220 {
1221  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
1222 }
1223 
1224 template<typename T>
1225 inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1226 {
1227  return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1228 }
1229 
1230 template<typename T>
1231 inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1232 {
1233  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1234 }
1235 
1236 template<typename T>
1237 inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1238 {
1239  return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1240 }
1241 
1242 template<typename T>
1243 inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1244 {
1245  return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1246 }
1247 
1248 
1249 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \
1250  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1251  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1252  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1253  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64)
1254 
1255 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \
1256  DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \
1257  DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \
1258  DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \
1259  DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64)
1260 
1261 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
1262 template<> \
1263 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1264 { \
1265  return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
1266 } \
1267  \
1268 template<> \
1269 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1270  mbed_memory_order order) noexcept \
1271 { \
1272  return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
1273 }
1274 
1275 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1276 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1277 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1278 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1279 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1280 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1281 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1282 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1283 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1284 
1285 namespace mstd {
1286 namespace impl {
1287 
1288 // Use custom assembler forms for pre-ops where available, else construct from post-ops
1289 #if MBED_EXCLUSIVE_ACCESS
1290 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1291  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1292  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1293  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1294  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1295  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1296  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1297 #else
1298 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1299  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1300  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1301  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
1302  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
1303  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
1304  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1305 #endif
1306 
1307 // *INDENT-OFF*
1308 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1309 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1310 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1311 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1312 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1313 // *INDENT-ON*
1314 
1315 }
1316 }
1317 
1318 #endif // __cplusplus
1319 
1320 #undef MBED_DOP_REG
1321 #undef MBED_CMP_IMM
1322 #undef MBED_SUB3_IMM
1323 #undef DO_MBED_LOCKFREE_EXCHG_ASM
1324 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM
1325 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM
1326 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM
1327 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
1328 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
1329 #undef DO_MBED_LOCKFREE_LOADSTORE
1330 #undef DO_MBED_LOCKFREE_EXCHG_OP
1331 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP
1332 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP
1333 #undef DO_MBED_LOCKFREE_NEWVAL_2OP
1334 #undef DO_MBED_LOCKFREE_OLDVAL_2OP
1335 #undef DO_MBED_LOCKFREE_OLDVAL_3OP
1336 #undef DO_MBED_LOCKFREE_EXCHG_OPS
1337 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS
1338 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS
1339 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS
1340 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
1341 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
1342 #undef DO_MBED_SIGNED_CAS_OP
1343 #undef DO_MBED_SIGNED_CAS_OPS
1344 #undef DO_MBED_SIGNED_FETCH_OP
1345 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP
1346 #undef DO_MBED_SIGNED_FETCH_OPS
1347 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS
1348 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS
1349 #undef DO_MBED_LOCKED_CAS_ORDERINGS
1350 #undef MBED_ACQUIRE_BARRIER
1351 #undef MBED_RELEASE_BARRIER
1352 #undef MBED_SEQ_CST_BARRIER
1353 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE
1354 #undef DO_MBED_ATOMIC_STORE_TEMPLATE
1355 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE
1356 #undef DO_MBED_ATOMIC_CAS_TEMPLATE
1357 #undef DO_MBED_ATOMIC_CAS_TEMPLATES
1358 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE
1359 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES
1360 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
1361 
1362 #endif
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
Atomic clear.
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic store.
#define MBED_FORCEINLINE
MBED_FORCEINLINE Declare a function that must always be inlined.
MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic add.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic add.
bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *value)
Atomic store.
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic add.
MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic exchange.
bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
A lock-free, primitive atomic flag.
Definition: mbed_atomic.h:118
void * core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic decrement.
void * core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic increment.
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic decrement.
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic exchange.
mbed_memory_order
Memory order constraints for atomic operations.
Definition: mbed_atomic.h:51
void * core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
Atomic exchange.
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_test_and_set
MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic subtract.
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic exchange.
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
Atomic test and set.
MBED_FORCEINLINE void * core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic subtract.
MBED_FORCEINLINE void * core_util_atomic_load_ptr(void *const volatile *valuePtr)
Atomic load.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic subtract.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order)
Atomic load.
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic increment.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
Atomic store.
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_clear
#define MBED_UNUSED
MBED_UNUSED Declare a function argument to be unused, suppressing compiler warnings.
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic subtract.
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.