Mistake on this page?
Report an issue in GitHub or email us
mbed_atomic_impl.h
1 /*
2  * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License"); you may
6  * not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #ifndef __MBED_ATOMIC_IMPL_H__
19 #define __MBED_ATOMIC_IMPL_H__
20 
21 #ifndef __MBED_UTIL_ATOMIC_H__
22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h"
23 #endif
24 
25 #include <stdint.h>
26 #include "cmsis.h"
27 #include "platform/mbed_assert.h"
28 #include "platform/mbed_toolchain.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #ifdef MBED_DEBUG
35 /* Plain loads must not have "release" or "acquire+release" order */
36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel)
37 
38 /* Plain stores must not have "consume", "acquire" or "acquire+release" order */
39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel)
40 
41 /* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */
42 #define MBED_CHECK_CAS_ORDER(success, failure) \
43  MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel)
44 #else
45 #define MBED_CHECK_LOAD_ORDER(order) (void)0
46 #define MBED_CHECK_STORE_ORDER(order) (void)0
47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0
48 #endif
49 
50 /* This is currently just to silence unit tests, so no better test required */
51 #ifdef __MBED__
52 #define MBED_ATOMIC_PTR_SIZE 32
53 #else
54 #define MBED_ATOMIC_PTR_SIZE 64
55 #endif
56 
57 /* Place barrier after a load or read-modify-write if a consume or acquire operation */
58 #define MBED_ACQUIRE_BARRIER(order) do { \
59  if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \
60  MBED_BARRIER(); \
61  } } while (0)
62 
63 /* Place barrier before a store or read-modify-write if a release operation */
64 #define MBED_RELEASE_BARRIER(order) do { \
65  if ((order) & mbed_memory_order_release) { \
66  MBED_BARRIER(); \
67  } } while (0)
68 
69 /* Place barrier after a plain store if a sequentially consistent */
70 #define MBED_SEQ_CST_BARRIER(order) do { \
71  if ((order) == mbed_memory_order_seq_cst) { \
72  MBED_BARRIER(); \
73  } } while (0)
74 
75 
76 
77 #if MBED_EXCLUSIVE_ACCESS
78 
79 /* This header file provides C inline definitions for atomic functions. */
80 /* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */
81 
82 /****************************** ASSEMBLER **********************************/
83 
84 // Fiddle about with constraints. These work for GCC and clang, but
85 // IAR appears to be restricted to having only a single constraint,
86 // so we can't do immediates.
87 #if MBED_EXCLUSIVE_ACCESS_THUMB1
88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB
89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate
90 #define MBED_SUB3_IMM "L" // -7 to +7
91 #else
92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers
93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate
94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate
95 #endif
96 
97 #if defined __clang__ || defined __GNUC__
98 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
99  __asm volatile ( \
100  ".syntax unified\n\t" \
101  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
102  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
103  : [oldValue] "=&r" (oldValue), \
104  [fail] "=&r" (fail), \
105  [value] "+Q" (*valuePtr) \
106  : [newValue] "r" (newValue) \
107  : \
108  )
109 #elif defined __ICCARM__
110 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
111 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
112  asm volatile ( \
113  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
114  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
115  : [oldValue] "=&r" (oldValue), \
116  [fail] "=&r" (fail) \
117  : [valuePtr] "r" (valuePtr), \
118  [newValue] "r" (newValue) \
119  : "memory" \
120  )
121 #endif
122 
123 #if defined __clang__ || defined __GNUC__
124 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
125  __asm volatile ( \
126  ".syntax unified\n\t" \
127  "LDREX"#M "\t%[newValue], %[value]\n\t" \
128  #OP "\t%[newValue], %[arg]\n\t" \
129  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
130  : [newValue] "=&" MBED_DOP_REG (newValue), \
131  [fail] "=&r" (fail), \
132  [value] "+Q" (*valuePtr) \
133  : [arg] Constants MBED_DOP_REG (arg) \
134  : "cc" \
135  )
136 #elif defined __ICCARM__
137 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
138 /* IAR does not support "ADDS reg, reg", so write as 3-operand */
139 #define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
140  asm volatile ( \
141  "LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \
142  #OP "\t%[newValue], %[newValue], %[arg]\n" \
143  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
144  : [newValue] "=&r" (newValue), \
145  [fail] "=&r" (fail) \
146  : [valuePtr] "r" (valuePtr), \
147  [arg] "r" (arg) \
148  : "memory", "cc" \
149  )
150 #endif
151 
152 #if defined __clang__ || defined __GNUC__
153 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
154  __asm volatile ( \
155  ".syntax unified\n\t" \
156  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
157  #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \
158  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
159  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
160  [newValue] "=&" MBED_DOP_REG (newValue), \
161  [fail] "=&r" (fail), \
162  [value] "+Q" (*valuePtr) \
163  : [arg] Constants MBED_DOP_REG (arg) \
164  : "cc" \
165  )
166 #elif defined __ICCARM__
167 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
168 #define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
169  asm volatile ( \
170  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
171  #OP "\t%[newValue], %[oldValue], %[arg]\n" \
172  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
173  : [oldValue] "=&r" (oldValue), \
174  [newValue] "=&r" (newValue), \
175  [fail] "=&r" (fail) \
176  : [valuePtr] "r" (valuePtr), \
177  [arg] "r" (arg) \
178  : "memory", "cc" \
179  )
180 #endif
181 
182 /* Bitwise operations are harder to do in ARMv8-M baseline - there
183  * are only 2-operand versions of the instructions.
184  */
185 #if defined __clang__ || defined __GNUC__
186 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
187  __asm volatile ( \
188  ".syntax unified\n\t" \
189  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
190  "MOV" "\t%[newValue], %[oldValue]\n\t" \
191  #OP "\t%[newValue], %[arg]\n\t" \
192  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
193  : [oldValue] "=&r" (oldValue), \
194  [newValue] "=&l" (newValue), \
195  [fail] "=&r" (fail), \
196  [value] "+Q" (*valuePtr) \
197  : [arg] Constants "l" (arg) \
198  : "cc" \
199  )
200 #elif defined __ICCARM__
201 #define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
202  asm volatile ( \
203  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
204  "MOV" "\t%[newValue], %[oldValue]\n" \
205  #OP "\t%[newValue], %[arg]\n" \
206  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
207  : [oldValue] "=&r" (oldValue), \
208  [newValue] "=&r" (newValue), \
209  [fail] "=&r" (fail) \
210  : [valuePtr] "r" (valuePtr), \
211  [arg] "r" (arg) \
212  : "memory", "cc" \
213  )
214 #endif
215 
216 /* Note that we split ARM and Thumb implementations for CAS, as
217  * the key distinction is the handling of conditions. Thumb-2 IT is
218  * partially deprecated, so avoid it, making Thumb-1 and Thumb-2
219  * implementations the same.
220  */
221 #if MBED_EXCLUSIVE_ACCESS_ARM
222 #if defined __clang__ || defined __GNUC__
223 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
224  __asm volatile ( \
225  ".syntax unified\n\t" \
226  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
227  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
228  "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \
229  : [oldValue] "=&r" (oldValue), \
230  [fail] "=&r" (fail), \
231  [value] "+Q" (*ptr) \
232  : [desiredValue] "r" (desiredValue), \
233  [expectedValue] "ILr" (expectedValue) \
234  : "cc" \
235  )
236 #elif defined __ICCARM__
237 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
238  asm volatile ( \
239  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
240  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
241  "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
242  : [oldValue] "=&r" (oldValue), \
243  [fail] "=&r" (fail) \
244  : [desiredValue] "r" (desiredValue), \
245  [expectedValue] "r" (expectedValue), \
246  [valuePtr] "r" (ptr), \
247  : "memory", "cc" \
248  )
249 #endif
250 #else // MBED_EXCLUSIVE_ACCESS_ARM
251 #if defined __clang__ || defined __GNUC__
252 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
253  __asm volatile ( \
254  ".syntax unified\n\t" \
255  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
256  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
257  "BNE" "\t%=f\n\t" \
258  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \
259  "%=:" \
260  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
261  [fail] "=&" MBED_DOP_REG (fail), \
262  [value] "+Q" (*ptr) \
263  : [desiredValue] "r" (desiredValue), \
264  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
265  : "cc" \
266  )
267 #elif defined __ICCARM__
268 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
269  asm volatile ( \
270  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
271  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
272  "BNE" "\tdone\n\t" \
273  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
274  "done:" \
275  : [oldValue] "=&r" (oldValue), \
276  [fail] "=&r" (fail) \
277  : [desiredValue] "r" (desiredValue), \
278  [expectedValue] "r" (expectedValue), \
279  [valuePtr] "r" (ptr) \
280  : "memory", "cc" \
281  )
282 #endif
283 #endif // MBED_EXCLUSIVE_ACCESS_ARM
284 
285 /* For strong CAS, conditional execution is complex enough to
286  * not be worthwhile, so all implementations look like Thumb-1.
287  * (This is the operation for which STREX returning 0 for success
288  * is beneficial.)
289  */
290 #if defined __clang__ || defined __GNUC__
291 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
292  __asm volatile ( \
293  ".syntax unified\n\t" \
294  "\n%=:\n\t" \
295  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
296  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
297  "BNE" "\t%=f\n" \
298  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \
299  "CMP" "\t%[fail], #0\n\t" \
300  "BNE" "\t%=b\n" \
301  "%=:" \
302  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
303  [fail] "=&" MBED_DOP_REG (fail), \
304  [value] "+Q" (*ptr) \
305  : [desiredValue] "r" (desiredValue), \
306  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
307  : "cc" \
308  )
309 #elif defined __ICCARM__
310 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
311  asm volatile ( \
312  "retry:\n" \
313  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
314  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
315  "BNE" "\tdone\n" \
316  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
317  "CMP" "\t%[fail], #0\n" \
318  "BNE" "\tretry\n" \
319  "done:" \
320  : [oldValue] "=&r" (oldValue), \
321  [fail] "=&r" (fail) \
322  : [desiredValue] "r" (desiredValue), \
323  [expectedValue] "r" (expectedValue), \
324  [valuePtr] "r" (ptr) \
325  : "memory", "cc" \
326  )
327 #endif
328 
329 /********************* LOCK-FREE IMPLEMENTATION MACROS ****************/
330 
331 /* Note care taken with types here. Values which the assembler outputs correctly
332  * narrowed, or inputs without caring about width, are marked as type T. Other
333  * values are uint32_t. It's not clear from documentation whether assembler
334  * assumes anything about widths, but try to signal correctly to get necessary
335  * narrowing, and avoid unnecessary.
336  * Tests show that GCC in particular will pass in unnarrowed values - eg passing
337  * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8,
338  * but wouldn't be for compare_and_exchange_u8.
339  * On the other hand, it seems to be impossible to stop GCC inserting narrowing
340  * instructions for the output - it will always put in UXTB for the oldValue of
341  * an operation.
342  */
343 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \
344 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \
345 { \
346  T oldValue; \
347  uint32_t fail; \
348  MBED_BARRIER(); \
349  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
350  MBED_BARRIER(); \
351  return oldValue; \
352 } \
353  \
354 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \
355  volatile T *valuePtr, T newValue, mbed_memory_order order) \
356 { \
357  T oldValue; \
358  uint32_t fail; \
359  MBED_RELEASE_BARRIER(order); \
360  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
361  MBED_ACQUIRE_BARRIER(order); \
362  return oldValue; \
363 }
364 
365 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \
366 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
367 { \
368  MBED_BARRIER(); \
369  T oldValue; \
370  uint32_t fail, expectedValue = *expectedCurrentValue; \
371  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
372  if (fail) { \
373  *expectedCurrentValue = oldValue; \
374  } \
375  MBED_BARRIER(); \
376  return !fail; \
377 } \
378  \
379 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
380 { \
381  MBED_CHECK_CAS_ORDER(success, failure); \
382  MBED_RELEASE_BARRIER(success); \
383  T oldValue; \
384  uint32_t fail, expectedValue = *expectedCurrentValue; \
385  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
386  if (fail) { \
387  *expectedCurrentValue = oldValue; \
388  } \
389  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
390  return !fail; \
391 }
392 
393 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \
394 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
395 { \
396  MBED_BARRIER(); \
397  T oldValue; \
398  uint32_t fail, expectedValue = *expectedCurrentValue; \
399  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
400  if (fail) { \
401  *expectedCurrentValue = oldValue; \
402  } \
403  MBED_BARRIER(); \
404  return !fail; \
405 } \
406  \
407 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
408 { \
409  MBED_CHECK_CAS_ORDER(success, failure); \
410  MBED_RELEASE_BARRIER(success); \
411  T oldValue; \
412  uint32_t fail, expectedValue = *expectedCurrentValue; \
413  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
414  if (fail) { \
415  *expectedCurrentValue = oldValue; \
416  } \
417  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
418  return !fail; \
419 }
420 
421 
422 #define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
423 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
424 { \
425  uint32_t fail, newValue; \
426  MBED_BARRIER(); \
427  do { \
428  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
429  } while (fail); \
430  MBED_BARRIER(); \
431  return (T) newValue; \
432 } \
433  \
434 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
435  volatile T *valuePtr, T arg, mbed_memory_order order) \
436 { \
437  uint32_t fail, newValue; \
438  MBED_RELEASE_BARRIER(order); \
439  do { \
440  DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
441  } while (fail); \
442  MBED_ACQUIRE_BARRIER(order); \
443  return (T) newValue; \
444 } \
445 
446 #define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
447 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
448 { \
449  T oldValue; \
450  uint32_t fail, newValue; \
451  MBED_BARRIER(); \
452  do { \
453  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
454  } while (fail); \
455  MBED_BARRIER(); \
456  return oldValue; \
457 } \
458  \
459 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
460  volatile T *valuePtr, T arg, mbed_memory_order order) \
461 { \
462  T oldValue; \
463  uint32_t fail, newValue; \
464  MBED_RELEASE_BARRIER(order); \
465  do { \
466  DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
467  } while (fail); \
468  MBED_ACQUIRE_BARRIER(order); \
469  return oldValue; \
470 } \
471 
472 #define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \
473 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \
474  T oldValue; \
475  uint32_t fail, newValue; \
476  MBED_BARRIER(); \
477  do { \
478  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
479  } while (fail); \
480  MBED_BARRIER(); \
481  return oldValue; \
482 } \
483  \
484 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
485  volatile T *valuePtr, T arg, mbed_memory_order order) \
486 { \
487  T oldValue; \
488  uint32_t fail, newValue; \
489  MBED_RELEASE_BARRIER(order); \
490  do { \
491  DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
492  } while (fail); \
493  MBED_ACQUIRE_BARRIER(order); \
494  return oldValue; \
495 } \
496 
497 inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
498 {
499  MBED_BARRIER();
500  bool oldValue, newValue = true;
501  uint32_t fail;
502  do {
503  DO_MBED_LOCKFREE_EXCHG_ASM(B);
504  } while (fail);
505  MBED_BARRIER();
506  return oldValue;
507 }
508 
510 {
511  MBED_RELEASE_BARRIER(order);
512  bool oldValue, newValue = true;
513  uint32_t fail;
514  do {
515  DO_MBED_LOCKFREE_EXCHG_ASM(B);
516  } while (fail);
517  MBED_ACQUIRE_BARRIER(order);
518  return oldValue;
519 }
520 
521 /********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/
522 
523 #define DO_MBED_LOCKFREE_EXCHG_OPS() \
524  DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \
525  DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
526  DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, )
527 
528 #define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \
529  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
530  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
531  DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, )
532 
533 #define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \
534  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \
535  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \
536  DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, )
537 
538 #define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \
539  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
540  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
541  DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, )
542 
543 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
544  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \
545  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \
546  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, )
547 
548 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \
549  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \
550  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
551  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, )
552 
553 // Note that these macros define a number of functions that are
554 // not in mbed_atomic.h, like core_util_atomic_and_fetch_u16.
555 // These are not documented via the doxygen in mbed_atomic.h, so
556 // for now should be regarded as internal only. They are used by the
557 // Atomic<T> template as an optimisation though.
558 
559 // We always use the "S" form of operations - avoids yet another
560 // possible unneeded distinction between Thumbv1 and Thumbv2, and
561 // may reduce code size by allowing 16-bit instructions.
562 #if !MBED_EXCLUSIVE_ACCESS_THUMB1
563 // I constraint is 12-bit modified immediate constant
564 // L constraint is negated 12-bit modified immediate constant
565 // (relying on assembler to swap ADD/SUB)
566 // We could permit J (-4095 to +4095) if we used ADD/SUB
567 // instead of ADDS/SUBS, but then that would block generation
568 // of the 16-bit forms. Shame we can't specify "don't care"
569 // for the "S", or get the GNU multi-alternative to
570 // choose ADDS/ADD appropriately.
571 
572 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL")
573 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
574 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
575 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
576 // K constraint is inverted 12-bit modified immediate constant
577 // (relying on assembler substituting BIC for AND)
578 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
579 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
580 #if MBED_EXCLUSIVE_ACCESS_ARM
581 // ARM does not have ORN instruction, so take plain immediates.
582 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "I")
583 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
584 #else
585 // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
586 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "IK")
587 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
588 #endif
589 // I constraint is 12-bit modified immediate operand
590 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I")
591 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
592 #else // MBED_EXCLUSIVE_ACCESS_THUMB1
593 // I constraint is 0-255; J is -255 to -1, suitable for
594 // 2-op ADD/SUB (relying on assembler to swap ADD/SUB)
595 // L constraint is -7 to +7, suitable for 3-op ADD/SUB
596 // (relying on assembler to swap ADD/SUB)
597 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L")
598 DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
599 DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
600 DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
601 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
602 DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
603 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
604 DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
605 DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
606 DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
607 #endif
608 
609 DO_MBED_LOCKFREE_EXCHG_OPS()
610 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
611 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
612 
613 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
614  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
615 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
616  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
617 #else // MBED_EXCLUSIVE_ACCESS
618 /* All the operations are locked, so need no ordering parameters */
619 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
620  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \
621  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \
622  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \
623  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
624 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
625  DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \
626  DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \
627  DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \
628  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
629 
631 {
632  return core_util_atomic_flag_test_and_set(valuePtr);
633 }
634 #endif // MBED_EXCLUSIVE_ACCESS
635 
636 /********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/
637 
638 /* Lock-free loads and stores don't need assembler - just aligned accesses */
639 /* Silly ordering of `T volatile` is because T can be `void *` */
640 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \
641 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
642 { \
643  T value = *valuePtr; \
644  MBED_BARRIER(); \
645  return value; \
646 } \
647  \
648 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
649 { \
650  MBED_CHECK_LOAD_ORDER(order); \
651  T value = *valuePtr; \
652  MBED_ACQUIRE_BARRIER(order); \
653  return value; \
654 } \
655  \
656 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
657 { \
658  MBED_BARRIER(); \
659  *valuePtr = value; \
660  MBED_BARRIER(); \
661 } \
662  \
663 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
664 { \
665  MBED_CHECK_STORE_ORDER(order); \
666  MBED_RELEASE_BARRIER(order); \
667  *valuePtr = value; \
668  MBED_SEQ_CST_BARRIER(order); \
669 }
670 
672 {
673  MBED_BARRIER();
674  flagPtr->_flag = false;
675  MBED_BARRIER();
676 }
677 
679 {
680  MBED_CHECK_STORE_ORDER(order);
681  MBED_RELEASE_BARRIER(order);
682  flagPtr->_flag = false;
683  MBED_SEQ_CST_BARRIER(order);
684 }
685 
686 #ifdef __cplusplus
687 // Temporarily turn off extern "C", so we can provide non-volatile load/store
688 // overloads for efficiency. All these functions are static inline, so this has
689 // no linkage effect exactly, it just permits the overloads.
690 } // extern "C"
691 
692 // For efficiency it's worth having non-volatile overloads
694 {
695  MBED_BARRIER();
696  flagPtr->_flag = false;
697  MBED_BARRIER();
698 }
699 
701 {
702  MBED_RELEASE_BARRIER(order);
703  flagPtr->_flag = false;
704  MBED_SEQ_CST_BARRIER(order);
705 }
706 
707 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
708 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
709 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
710 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
711 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
712 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
713 DO_MBED_LOCKFREE_LOADSTORE(bool,, bool)
714 DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
715 
716 #endif
717 
718 DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8)
719 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
720 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
721 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
722 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
723 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
724 DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool)
725 DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr)
726 
727 #ifdef __cplusplus
728 extern "C" {
729 #endif
730 
731 /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
732 
733 MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
734 {
735  return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr);
736 }
737 
738 MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
739 {
740  core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
741 }
742 
743 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \
744 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \
745  T *expectedCurrentValue, T desiredValue) \
746 { \
747  return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \
748  (u##T *)expectedCurrentValue, (u##T)desiredValue); \
749 } \
750  \
751 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \
752  T *expectedCurrentValue, T desiredValue, \
753  mbed_memory_order success, mbed_memory_order failure) \
754 { \
755  return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \
756  (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \
757 }
758 
759 #define DO_MBED_SIGNED_CAS_OPS(name) \
760  DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \
761  DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \
762  DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \
763  DO_MBED_SIGNED_CAS_OP(name, int64_t, 64)
764 
765 DO_MBED_SIGNED_CAS_OPS(cas)
766 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
767 
768 MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
769 {
770  return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
771 }
772 
773 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
774 {
775  return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
776 }
777 
778 inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
779 {
780 #if MBED_ATOMIC_PTR_SIZE == 32
782  (volatile uint32_t *)ptr,
783  (uint32_t *)expectedCurrentValue,
784  (uint32_t)desiredValue);
785 #else
787  (volatile uint64_t *)ptr,
788  (uint64_t *)expectedCurrentValue,
789  (uint64_t)desiredValue);
790 #endif
791 }
792 
793 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
794 {
795 #if MBED_ATOMIC_PTR_SIZE == 32
797  (volatile uint32_t *)ptr,
798  (uint32_t *)expectedCurrentValue,
799  (uint32_t)desiredValue,
800  success, failure);
801 #else
803  (volatile uint64_t *)ptr,
804  (uint64_t *)expectedCurrentValue,
805  (uint64_t)desiredValue,
806  success, failure);
807 #endif
808 }
809 
810 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
811 {
812  return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
813 }
814 
815 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
816 {
817  return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
818 }
819 
820 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
821 {
822 #if MBED_ATOMIC_PTR_SIZE == 32
824  (volatile uint32_t *)ptr,
825  (uint32_t *)expectedCurrentValue,
826  (uint32_t)desiredValue);
827 #else
829  (volatile uint64_t *)ptr,
830  (uint64_t *)expectedCurrentValue,
831  (uint64_t)desiredValue);
832 #endif
833 }
834 
835 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
836 {
837 #if MBED_ATOMIC_PTR_SIZE == 32
839  (volatile uint32_t *)ptr,
840  (uint32_t *)expectedCurrentValue,
841  (uint32_t)desiredValue,
842  success, failure);
843 #else
845  (volatile uint64_t *)ptr,
846  (uint64_t *)expectedCurrentValue,
847  (uint64_t)desiredValue,
848  success, failure);
849 #endif
850 }
851 
852 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \
853 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \
854 { \
855  return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \
856 }
857 
858 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \
859 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \
860 { \
861  return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \
862 }
863 
864 #define DO_MBED_SIGNED_FETCH_OPS(name) \
865  DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \
866  DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \
867  DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \
868  DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64)
869 
870 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \
871  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \
872  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \
873  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \
874  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64)
875 
876 DO_MBED_SIGNED_FETCH_OPS(exchange)
877 DO_MBED_SIGNED_FETCH_OPS(incr)
878 DO_MBED_SIGNED_FETCH_OPS(decr)
879 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
880 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
881 
882 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
883 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
884 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
885 
886 MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
887 {
888  return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
889 }
890 
891 MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
892 {
893  return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order);
894 }
895 
896 inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
897 {
898 #if MBED_ATOMIC_PTR_SIZE == 32
899  return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
900 #else
901  return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
902 #endif
903 }
904 
905 MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
906 {
907 #if MBED_ATOMIC_PTR_SIZE == 32
908  return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order);
909 #else
910  return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order);
911 #endif
912 }
913 
914 inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
915 {
916 #if MBED_ATOMIC_PTR_SIZE == 32
917  return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
918 #else
919  return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
920 #endif
921 }
922 
923 inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
924 {
925 #if MBED_ATOMIC_PTR_SIZE == 32
926  return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
927 #else
928  return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
929 #endif
930 }
931 
932 MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
933 {
934 #if MBED_ATOMIC_PTR_SIZE == 32
935  return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
936 #else
937  return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
938 #endif
939 }
940 
941 MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
942 {
943 #if MBED_ATOMIC_PTR_SIZE == 32
944  return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
945 #else
946  return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
947 #endif
948 }
949 
950 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
951 {
952 #if MBED_ATOMIC_PTR_SIZE == 32
953  return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
954 #else
955  return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
956 #endif
957 }
958 
959 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
960 {
961 #if MBED_ATOMIC_PTR_SIZE == 32
962  return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
963 #else
964  return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
965 #endif
966 }
967 
968 /***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/
969 
970 /* Need to throw away the ordering information for all locked operations */
971 MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
972 {
973  MBED_CHECK_LOAD_ORDER(order);
974  return core_util_atomic_load_u64(valuePtr);
975 }
976 
978 {
979  MBED_CHECK_LOAD_ORDER(order);
980  return core_util_atomic_load_s64(valuePtr);
981 }
982 
983 MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order)
984 {
985  MBED_CHECK_STORE_ORDER(order);
986  core_util_atomic_store_u64(valuePtr, desiredValue);
987 }
988 
989 MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order)
990 {
991  MBED_CHECK_STORE_ORDER(order);
992  core_util_atomic_store_s64(valuePtr, desiredValue);
993 }
994 
995 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \
996 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
997  volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \
998 { \
999  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1000 }
1001 
1002 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \
1003 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \
1004  volatile T *ptr, T *expectedCurrentValue, T desiredValue, \
1005  MBED_UNUSED mbed_memory_order success, \
1006  MBED_UNUSED mbed_memory_order failure) \
1007 { \
1008  MBED_CHECK_CAS_ORDER(success, failure); \
1009  return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1010 }
1011 
1012 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
1013 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
1014 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
1015 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
1016 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
1017 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1018 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1019 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1020 
1021 #ifdef __cplusplus
1022 } // extern "C"
1023 
1024 /***************** TEMPLATE IMPLEMENTATIONS *****************/
1025 
1026 /* Each of these groups provides specialisations for the T template for each of
1027  * the small types (there is no base implementation), and the base implementation
1028  * of the T * template.
1029  */
1030 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
1031 template<> \
1032 inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
1033 { \
1034  return core_util_atomic_load_##fn_suffix(valuePtr); \
1035 } \
1036  \
1037 template<> \
1038 inline T core_util_atomic_load(const T *valuePtr) noexcept \
1039 { \
1040  return core_util_atomic_load_##fn_suffix(valuePtr); \
1041 } \
1042  \
1043 template<> \
1044 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
1045 { \
1046  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1047 } \
1048  \
1049 template<> \
1050 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
1051 { \
1052  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1053 }
1054 
1055 template<typename T>
1056 inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept
1057 {
1058  return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
1059 }
1060 
1061 template<typename T>
1062 inline T *core_util_atomic_load(T *const *valuePtr) noexcept
1063 {
1064  return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
1065 }
1066 
1067 template<typename T>
1068 inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept
1069 {
1070  return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
1071 }
1072 
1073 template<typename T>
1074 inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept
1075 {
1076  return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
1077 }
1078 
1079 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1080 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1081 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1082 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1083 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1084 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1085 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1086 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1087 DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
1088 
1089 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
1090 template<> \
1091 inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
1092 { \
1093  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1094 } \
1095  \
1096 template<> \
1097 inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
1098 { \
1099  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1100 } \
1101  \
1102 template<> \
1103 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
1104 { \
1105  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1106 } \
1107  \
1108 template<> \
1109 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
1110 { \
1111  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1112 }
1113 
1114 template<typename T>
1115 inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept
1116 {
1117  core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
1118 }
1119 
1120 template<typename T>
1121 inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
1122 {
1123  core_util_atomic_store_ptr((void **) valuePtr, val);
1124 }
1125 
1126 template<typename T>
1127 inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
1128 {
1129  core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
1130 }
1131 
1132 template<typename T>
1133 inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept
1134 {
1135  core_util_atomic_store_ptr((void **) valuePtr, val, order);
1136 }
1137 
1138 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1139 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1140 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1141 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1142 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1143 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1144 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1145 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1146 DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
1147 
1148 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
1149 template<> inline \
1150 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
1151 { \
1152  return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1153 }
1154 
1155 template<typename T>
1156 inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1157 {
1158  return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1159 }
1160 
1161 template<typename T>
1162 inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
1163 {
1164  return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1165 }
1166 
1167 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \
1168  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \
1169  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \
1170  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \
1171  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \
1172  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \
1173  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \
1174  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \
1175  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \
1176  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool)
1177 
1178 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1179 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1180 
1181 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
1182 template<> \
1183 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1184 { \
1185  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1186 } \
1187  \
1188 template<> \
1189 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1190  mbed_memory_order order) noexcept \
1191 { \
1192  return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
1193 }
1194 
1195 
1196 template<>
1197 inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept
1198 {
1199  return core_util_atomic_exchange_bool(valuePtr, arg);
1200 }
1201 
1202 template<>
1203 inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
1204 {
1205  return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
1206 }
1207 
1208 template<typename T>
1209 inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept
1210 {
1211  return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
1212 }
1213 
1214 template<typename T>
1215 inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
1216 {
1217  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
1218 }
1219 
1220 template<typename T>
1221 inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1222 {
1223  return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1224 }
1225 
1226 template<typename T>
1227 inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1228 {
1229  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1230 }
1231 
1232 template<typename T>
1233 inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept
1234 {
1235  return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1236 }
1237 
1238 template<typename T>
1239 inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
1240 {
1241  return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1242 }
1243 
1244 
1245 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \
1246  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1247  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1248  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1249  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64)
1250 
1251 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \
1252  DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \
1253  DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \
1254  DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \
1255  DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64)
1256 
1257 #define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
1258 template<> \
1259 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1260 { \
1261  return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
1262 } \
1263  \
1264 template<> \
1265 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1266  mbed_memory_order order) noexcept \
1267 { \
1268  return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
1269 }
1270 
1271 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1272 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1273 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1274 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1275 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1276 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1277 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1278 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1279 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1280 
1281 namespace mstd {
1282 namespace impl {
1283 
1284 // Use custom assembler forms for pre-ops where available, else construct from post-ops
1285 #if MBED_EXCLUSIVE_ACCESS
1286 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1287  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1288  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1289  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1290  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1291  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1292  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1293 #else
1294 #define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
1295  template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
1296  template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
1297  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
1298  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
1299  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
1300  DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1301 #endif
1302 
1303 // *INDENT-OFF*
1304 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
1305 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
1306 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
1307 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
1308 DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
1309 // *INDENT-ON*
1310 
1311 }
1312 }
1313 
1314 #endif // __cplusplus
1315 
1316 #undef MBED_DOP_REG
1317 #undef MBED_CMP_IMM
1318 #undef MBED_SUB3_IMM
1319 #undef DO_MBED_LOCKFREE_EXCHG_ASM
1320 #undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM
1321 #undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM
1322 #undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM
1323 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
1324 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
1325 #undef DO_MBED_LOCKFREE_LOADSTORE
1326 #undef DO_MBED_LOCKFREE_EXCHG_OP
1327 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP
1328 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP
1329 #undef DO_MBED_LOCKFREE_NEWVAL_2OP
1330 #undef DO_MBED_LOCKFREE_OLDVAL_2OP
1331 #undef DO_MBED_LOCKFREE_OLDVAL_3OP
1332 #undef DO_MBED_LOCKFREE_EXCHG_OPS
1333 #undef DO_MBED_LOCKFREE_NEWVAL_2OPS
1334 #undef DO_MBED_LOCKFREE_OLDVAL_2OPS
1335 #undef DO_MBED_LOCKFREE_OLDVAL_3OPS
1336 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
1337 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
1338 #undef DO_MBED_SIGNED_CAS_OP
1339 #undef DO_MBED_SIGNED_CAS_OPS
1340 #undef DO_MBED_SIGNED_FETCH_OP
1341 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP
1342 #undef DO_MBED_SIGNED_FETCH_OPS
1343 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS
1344 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS
1345 #undef DO_MBED_LOCKED_CAS_ORDERINGS
1346 #undef MBED_ACQUIRE_BARRIER
1347 #undef MBED_RELEASE_BARRIER
1348 #undef MBED_SEQ_CST_BARRIER
1349 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE
1350 #undef DO_MBED_ATOMIC_STORE_TEMPLATE
1351 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE
1352 #undef DO_MBED_ATOMIC_CAS_TEMPLATE
1353 #undef DO_MBED_ATOMIC_CAS_TEMPLATES
1354 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE
1355 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES
1356 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
1357 
1358 #endif
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
Atomic clear.
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic store.
#define MBED_FORCEINLINE
MBED_FORCEINLINE Declare a function that must always be inlined.
MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic add.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic add.
bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *value)
Atomic store.
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic add.
MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic exchange.
bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
A lock-free, primitive atomic flag.
Definition: mbed_atomic.h:115
void * core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic decrement.
void * core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic increment.
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic decrement.
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic exchange.
mbed_memory_order
Memory order constraints for atomic operations.
Definition: mbed_atomic.h:51
void * core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
Atomic exchange.
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_test_and_set
MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
Atomic load.
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic subtract.
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic exchange.
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
Atomic test and set.
MBED_FORCEINLINE void * core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic store.
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic subtract.
MBED_FORCEINLINE void * core_util_atomic_load_ptr(void *const volatile *valuePtr)
Atomic load.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic subtract.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order)
Atomic load.
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic increment.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
Atomic store.
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_clear
#define MBED_UNUSED
MBED_UNUSED Declare a function argument to be unused, suppressing compiler warnings.
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic subtract.
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.