Mistake on this page?
Report an issue in GitHub or email us
mbed_atomic_impl.h
1 /*
2  * Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License"); you may
6  * not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #ifndef __MBED_ATOMIC_IMPL_H__
19 #define __MBED_ATOMIC_IMPL_H__
20 
21 #ifndef __MBED_UTIL_ATOMIC_H__
22 #error "mbed_atomic_impl.h is designed to be included only by mbed_atomic.h"
23 #endif
24 
25 #include <stdint.h>
26 #include "cmsis.h"
27 #include "platform/mbed_assert.h"
28 #include "platform/mbed_toolchain.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #ifdef MBED_DEBUG
35 /* Plain loads must not have "release" or "acquire+release" order */
36 #define MBED_CHECK_LOAD_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_release && (order) != mbed_memory_order_acq_rel)
37 
38 /* Plain stores must not have "consume", "acquire" or "acquire+release" order */
39 #define MBED_CHECK_STORE_ORDER(order) MBED_ASSERT((order) != mbed_memory_order_consume && (order) != mbed_memory_order_acquire && (order) != mbed_memory_order_acq_rel)
40 
41 /* Compare exchange needs failure order no stronger than success, and failure can't be "release" or "acquire+release" */
42 #define MBED_CHECK_CAS_ORDER(success, failure) \
43  MBED_ASSERT((failure) <= (success) && (failure) != mbed_memory_order_release && (failure) != mbed_memory_order_acq_rel)
44 #else
45 #define MBED_CHECK_LOAD_ORDER(order) (void)0
46 #define MBED_CHECK_STORE_ORDER(order) (void)0
47 #define MBED_CHECK_CAS_ORDER(success, failure) (void)0
48 #endif
49 
50 /* This is currently just to silence unit tests, so no better test required */
51 #ifdef __MBED__
52 #define MBED_ATOMIC_PTR_SIZE 32
53 #else
54 #define MBED_ATOMIC_PTR_SIZE 64
55 #endif
56 
57 /* Place barrier after a load or read-modify-write if a consume or acquire operation */
58 #define MBED_ACQUIRE_BARRIER(order) do { \
59  if ((order) & (mbed_memory_order_consume|mbed_memory_order_acquire)) { \
60  MBED_BARRIER(); \
61  } } while (0)
62 
63 /* Place barrier before a store or read-modify-write if a release operation */
64 #define MBED_RELEASE_BARRIER(order) do { \
65  if ((order) & mbed_memory_order_release) { \
66  MBED_BARRIER(); \
67  } } while (0)
68 
69 /* Place barrier after a plain store if a sequentially consistent */
70 #define MBED_SEQ_CST_BARRIER(order) do { \
71  if ((order) == mbed_memory_order_seq_cst) { \
72  MBED_BARRIER(); \
73  } } while (0)
74 
75 
76 
77 #if MBED_EXCLUSIVE_ACCESS
78 
79 /* This header file provides C inline definitions for atomic functions. */
80 /* For C99 inline semantic compatibility, mbed_atomic_impl.c has out-of-line definitions. */
81 
82 /****************************** ASSEMBLER **********************************/
83 
84 // Fiddle about with constraints. These work for GCC and clang, but
85 // IAR appears to be restricted to having only a single constraint,
86 // so we can't do immediates.
87 #if MBED_EXCLUSIVE_ACCESS_THUMB1
88 #define MBED_DOP_REG "l" // Need low register to get 16-bit 3-op ADD/SUB
89 #define MBED_CMP_IMM "I" // CMP 8-bit immediate
90 #define MBED_SUB3_IMM "L" // -7 to +7
91 #else
92 #define MBED_DOP_REG "r" // Can use 32-bit 3-op ADD/SUB, so any registers
93 #define MBED_CMP_IMM "IL" // CMP or CMN, 12-bit immediate
94 #define MBED_SUB3_IMM "IL" // SUB or ADD, 12-bit immediate
95 #endif
96 
97 // ARM C 5 inline assembler recommends against using LDREX/STREX
98 // for same reason as intrinsics, but there's no other way to get
99 // inlining. ARM C 5 is being retired anyway.
100 
101 #ifdef __CC_ARM
102 #pragma diag_suppress 3732
103 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
104  __asm { \
105  LDREX##M oldValue, [valuePtr] ; \
106  STREX##M fail, newValue, [valuePtr] \
107  }
108 #elif defined __clang__ || defined __GNUC__
109 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
110  __asm volatile ( \
111  ".syntax unified\n\t" \
112  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
113  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
114  : [oldValue] "=&r" (oldValue), \
115  [fail] "=&r" (fail), \
116  [value] "+Q" (*valuePtr) \
117  : [newValue] "r" (newValue) \
118  : \
119  )
120 #elif defined __ICCARM__
121 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
122 #define DO_MBED_LOCKFREE_EXCHG_ASM(M) \
123  asm volatile ( \
124  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
125  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
126  : [oldValue] "=&r" (oldValue), \
127  [fail] "=&r" (fail) \
128  : [valuePtr] "r" (valuePtr), \
129  [newValue] "r" (newValue) \
130  : "memory" \
131  )
132 #endif
133 
134 #ifdef __CC_ARM
135 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
136  __asm { \
137  LDREX##M oldValue, [valuePtr] ; \
138  OP newValue, oldValue, arg ; \
139  STREX##M fail, newValue, [valuePtr] \
140  }
141 #elif defined __clang__ || defined __GNUC__
142 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
143  __asm volatile ( \
144  ".syntax unified\n\t" \
145  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
146  #OP "\t%[newValue], %[oldValue], %[arg]\n\t" \
147  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
148  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
149  [newValue] "=&" MBED_DOP_REG (newValue), \
150  [fail] "=&r" (fail), \
151  [value] "+Q" (*valuePtr) \
152  : [arg] Constants MBED_DOP_REG (arg) \
153  : "cc" \
154  )
155 #elif defined __ICCARM__
156 /* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
157 #define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
158  asm volatile ( \
159  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
160  #OP "\t%[newValue], %[oldValue], %[arg]\n" \
161  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
162  : [oldValue] "=&r" (oldValue), \
163  [newValue] "=&r" (newValue), \
164  [fail] "=&r" (fail) \
165  : [valuePtr] "r" (valuePtr), \
166  [arg] "r" (arg) \
167  : "memory", "cc" \
168  )
169 #endif
170 
171 /* Bitwise operations are harder to do in ARMv8-M baseline - there
172  * are only 2-operand versions of the instructions.
173  */
174 #ifdef __CC_ARM
175 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
176  __asm { \
177  LDREX##M oldValue, [valuePtr] ; \
178  MOV newValue, oldValue ; \
179  OP newValue, arg ; \
180  STREX##M fail, newValue, [valuePtr] \
181  }
182 #elif defined __clang__ || defined __GNUC__
183 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
184  __asm volatile ( \
185  ".syntax unified\n\t" \
186  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
187  "MOV" "\t%[newValue], %[oldValue]\n\t" \
188  #OP "\t%[newValue], %[arg]\n\t" \
189  "STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
190  : [oldValue] "=&r" (oldValue), \
191  [newValue] "=&l" (newValue), \
192  [fail] "=&r" (fail), \
193  [value] "+Q" (*valuePtr) \
194  : [arg] Constants "l" (arg) \
195  : "cc" \
196  )
197 #elif defined __ICCARM__
198 #define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
199  asm volatile ( \
200  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
201  "MOV" "\t%[newValue], %[oldValue]\n" \
202  #OP "\t%[newValue], %[arg]\n" \
203  "STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
204  : [oldValue] "=&r" (oldValue), \
205  [newValue] "=&r" (newValue), \
206  [fail] "=&r" (fail) \
207  : [valuePtr] "r" (valuePtr), \
208  [arg] "r" (arg) \
209  : "memory", "cc" \
210  )
211 #endif
212 
213 /* Note that we split ARM and Thumb implementations for CAS, as
214  * the key distinction is the handling of conditions. Thumb-2 IT is
215  * partially deprecated, so avoid it, making Thumb-1 and Thumb-2
216  * implementations the same.
217  */
218 #if MBED_EXCLUSIVE_ACCESS_ARM
219 #ifdef __CC_ARM
220 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
221  __asm { \
222  LDREX##M oldValue, [ptr] ; \
223  SUBS fail, oldValue, expectedValue ; \
224  STREX##M##EQ fail, desiredValue, [ptr] \
225  }
226 #elif defined __clang__ || defined __GNUC__
227 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
228  __asm volatile ( \
229  ".syntax unified\n\t" \
230  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
231  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
232  "STREX"#M"EQ\t%[fail], %[desiredValue], %[value]\n\t" \
233  : [oldValue] "=&r" (oldValue), \
234  [fail] "=&r" (fail), \
235  [value] "+Q" (*ptr) \
236  : [desiredValue] "r" (desiredValue), \
237  [expectedValue] "ILr" (expectedValue) \
238  : "cc" \
239  )
240 #elif defined __ICCARM__
241 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
242  asm volatile ( \
243  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
244  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
245  "STREX"#M"EQ\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
246  : [oldValue] "=&r" (oldValue), \
247  [fail] "=&r" (fail) \
248  : [desiredValue] "r" (desiredValue), \
249  [expectedValue] "r" (expectedValue), \
250  [valuePtr] "r" (ptr), \
251  : "memory", "cc" \
252  )
253 #endif
254 #else // MBED_EXCLUSIVE_ACCESS_ARM
255 #ifdef __CC_ARM
256 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
257  __asm { \
258  LDREX##M oldValue, [ptr] ; \
259  SUBS fail, oldValue, expectedValue ; \
260  BNE done ; \
261  STREX##M fail, desiredValue, [ptr] ; \
262 done: \
263  }
264 #elif defined __clang__ || defined __GNUC__
265 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
266  __asm volatile ( \
267  ".syntax unified\n\t" \
268  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
269  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
270  "BNE" "\t%=f\n\t" \
271  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n" \
272  "%=:" \
273  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
274  [fail] "=&" MBED_DOP_REG (fail), \
275  [value] "+Q" (*ptr) \
276  : [desiredValue] "r" (desiredValue), \
277  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
278  : "cc" \
279  )
280 #elif defined __ICCARM__
281 #define DO_MBED_LOCKFREE_CAS_WEAK_ASM(M) \
282  asm volatile ( \
283  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
284  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
285  "BNE" "\tdone\n\t" \
286  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
287  "done:" \
288  : [oldValue] "=&r" (oldValue), \
289  [fail] "=&r" (fail) \
290  : [desiredValue] "r" (desiredValue), \
291  [expectedValue] "r" (expectedValue), \
292  [valuePtr] "r" (ptr) \
293  : "memory", "cc" \
294  )
295 #endif
296 #endif // MBED_EXCLUSIVE_ACCESS_ARM
297 
298 /* For strong CAS, conditional execution is complex enough to
299  * not be worthwhile, so all implementations look like Thumb-1.
300  * (This is the operation for which STREX returning 0 for success
301  * is beneficial.)
302  */
303 #ifdef __CC_ARM
304 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
305  __asm { \
306  retry: ; \
307  LDREX##M oldValue, [ptr] ; \
308  SUBS fail, oldValue, expectedValue ; \
309  BNE done ; \
310  STREX##M fail, desiredValue, [ptr] ; \
311  CMP fail, 0 ; \
312  BNE retry ; \
313  done: \
314  }
315 #elif defined __clang__ || defined __GNUC__
316 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
317  __asm volatile ( \
318  ".syntax unified\n\t" \
319  "\n%=:\n\t" \
320  "LDREX"#M "\t%[oldValue], %[value]\n\t" \
321  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n\t"\
322  "BNE" "\t%=f\n" \
323  "STREX"#M "\t%[fail], %[desiredValue], %[value]\n\t" \
324  "CMP" "\t%[fail], #0\n\t" \
325  "BNE" "\t%=b\n" \
326  "%=:" \
327  : [oldValue] "=&" MBED_DOP_REG (oldValue), \
328  [fail] "=&" MBED_DOP_REG (fail), \
329  [value] "+Q" (*ptr) \
330  : [desiredValue] "r" (desiredValue), \
331  [expectedValue] MBED_SUB3_IMM MBED_DOP_REG (expectedValue) \
332  : "cc" \
333  )
334 #elif defined __ICCARM__
335 #define DO_MBED_LOCKFREE_CAS_STRONG_ASM(M) \
336  asm volatile ( \
337  "retry:\n" \
338  "LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
339  "SUBS" "\t%[fail], %[oldValue], %[expectedValue]\n" \
340  "BNE" "\tdone\n" \
341  "STREX"#M "\t%[fail], %[desiredValue], [%[valuePtr]]\n"\
342  "CMP" "\t%[fail], #0\n" \
343  "BNE" "\tretry\n" \
344  "done:" \
345  : [oldValue] "=&r" (oldValue), \
346  [fail] "=&r" (fail) \
347  : [desiredValue] "r" (desiredValue), \
348  [expectedValue] "r" (expectedValue), \
349  [valuePtr] "r" (ptr) \
350  : "memory", "cc" \
351  )
352 #endif
353 
354 /********************* LOCK-FREE IMPLEMENTATION MACROS ****************/
355 
356 /* Note care taken with types here. Values which the assembler outputs correctly
357  * narrowed, or inputs without caring about width, are marked as type T. Other
358  * values are uint32_t. It's not clear from documentation whether assembler
359  * assumes anything about widths, but try to signal correctly to get necessary
360  * narrowing, and avoid unnecessary.
361  * Tests show that GCC in particular will pass in unnarrowed values - eg passing
362  * "uint8_t arg = -1" to the assembler as 0xFFFFFFFF. This is fine for, eg, add_u8,
363  * but wouldn't be for compare_and_exchange_u8.
364  * On the other hand, it seems to be impossible to stop GCC inserting narrowing
365  * instructions for the output - it will always put in UXTB for the oldValue of
366  * an operation.
367  */
368 #define DO_MBED_LOCKFREE_EXCHG_OP(T, fn_suffix, M) \
369 inline T core_util_atomic_exchange_##fn_suffix(volatile T *valuePtr, T newValue) \
370 { \
371  T oldValue; \
372  uint32_t fail; \
373  MBED_BARRIER(); \
374  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
375  MBED_BARRIER(); \
376  return oldValue; \
377 } \
378  \
379 MBED_FORCEINLINE T core_util_atomic_exchange_explicit_##fn_suffix( \
380  volatile T *valuePtr, T newValue, mbed_memory_order order) \
381 { \
382  T oldValue; \
383  uint32_t fail; \
384  MBED_RELEASE_BARRIER(order); \
385  DO_MBED_LOCKFREE_EXCHG_ASM(M); \
386  MBED_ACQUIRE_BARRIER(order); \
387  return oldValue; \
388 }
389 
390 #define DO_MBED_LOCKFREE_CAS_WEAK_OP(T, fn_suffix, M) \
391 inline bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
392 { \
393  MBED_BARRIER(); \
394  T oldValue; \
395  uint32_t fail, expectedValue = *expectedCurrentValue; \
396  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
397  if (fail) { \
398  *expectedCurrentValue = oldValue; \
399  } \
400  MBED_BARRIER(); \
401  return !fail; \
402 } \
403  \
404 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
405 { \
406  MBED_CHECK_CAS_ORDER(success, failure); \
407  MBED_RELEASE_BARRIER(success); \
408  T oldValue; \
409  uint32_t fail, expectedValue = *expectedCurrentValue; \
410  DO_MBED_LOCKFREE_CAS_WEAK_ASM(M); \
411  if (fail) { \
412  *expectedCurrentValue = oldValue; \
413  } \
414  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
415  return !fail; \
416 }
417 
418 #define DO_MBED_LOCKFREE_CAS_STRONG_OP(T, fn_suffix, M) \
419 inline bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
420 { \
421  MBED_BARRIER(); \
422  T oldValue; \
423  uint32_t fail, expectedValue = *expectedCurrentValue; \
424  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
425  if (fail) { \
426  *expectedCurrentValue = oldValue; \
427  } \
428  MBED_BARRIER(); \
429  return !fail; \
430 } \
431  \
432 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) \
433 { \
434  MBED_CHECK_CAS_ORDER(success, failure); \
435  MBED_RELEASE_BARRIER(success); \
436  T oldValue; \
437  uint32_t fail, expectedValue = *expectedCurrentValue; \
438  DO_MBED_LOCKFREE_CAS_STRONG_ASM(M); \
439  if (fail) { \
440  *expectedCurrentValue = oldValue; \
441  } \
442  MBED_ACQUIRE_BARRIER(fail ? failure : success); \
443  return !fail; \
444 }
445 
446 
447 #define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \
448 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
449 { \
450  T oldValue; \
451  uint32_t fail, newValue; \
452  MBED_BARRIER(); \
453  do { \
454  DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \
455  } while (fail); \
456  MBED_BARRIER(); \
457  return (T) retValue; \
458 } \
459  \
460 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
461  volatile T *valuePtr, T arg, mbed_memory_order order) \
462 { \
463  T oldValue; \
464  uint32_t fail, newValue; \
465  MBED_RELEASE_BARRIER(order); \
466  do { \
467  DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \
468  } while (fail); \
469  MBED_ACQUIRE_BARRIER(order); \
470  return (T) retValue; \
471 } \
472 
473 #define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \
474 inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \
475  T oldValue; \
476  uint32_t fail, newValue; \
477  MBED_BARRIER(); \
478  do { \
479  DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \
480  } while (fail); \
481  MBED_BARRIER(); \
482  return (T) retValue; \
483 } \
484  \
485 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
486  volatile T *valuePtr, T arg, mbed_memory_order order) \
487 { \
488  T oldValue; \
489  uint32_t fail, newValue; \
490  MBED_RELEASE_BARRIER(order); \
491  do { \
492  DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \
493  } while (fail); \
494  MBED_ACQUIRE_BARRIER(order); \
495  return (T) retValue; \
496 } \
497 
498 inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
499 {
500  MBED_BARRIER();
501  bool oldValue, newValue = true;
502  uint32_t fail;
503  do {
504  DO_MBED_LOCKFREE_EXCHG_ASM(B);
505  } while (fail);
506  MBED_BARRIER();
507  return oldValue;
508 }
509 
511 {
512  MBED_RELEASE_BARRIER(order);
513  bool oldValue, newValue = true;
514  uint32_t fail;
515  do {
516  DO_MBED_LOCKFREE_EXCHG_ASM(B);
517  } while (fail);
518  MBED_ACQUIRE_BARRIER(order);
519  return oldValue;
520 }
521 
522 /********************* LOCK-FREE IMPLEMENTATION DEFINITIONS ****************/
523 
524 #define DO_MBED_LOCKFREE_EXCHG_OPS() \
525  DO_MBED_LOCKFREE_EXCHG_OP(uint8_t, u8, B) \
526  DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
527  DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, )
528 
529 #define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \
530  DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \
531  DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \
532  DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, )
533 
534 #define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \
535  DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \
536  DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \
537  DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, )
538 
539 #define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
540  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \
541  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint16_t, u16, H) \
542  DO_MBED_LOCKFREE_CAS_STRONG_OP(uint32_t, u32, )
543 
544 #define DO_MBED_LOCKFREE_CAS_WEAK_OPS() \
545  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint8_t, u8, B) \
546  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
547  DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, )
548 
549 
550 // We always use the "S" form of operations - avoids yet another
551 // possible unneeded distinction between Thumbv1 and Thumbv2, and
552 // may reduce code size by allowing 16-bit instructions.
553 #if !MBED_EXCLUSIVE_ACCESS_THUMB1
554 // I constraint is 12-bit modified immediate constant
555 // L constraint is negated 12-bit modified immediate constant
556 // (relying on assembler to swap ADD/SUB)
557 // We could permit J (-4095 to +4095) if we used ADD/SUB
558 // instead of ADDS/SUBS, but then that would block generation
559 // of the 16-bit forms. Shame we can't specify "don't care"
560 // for the "S", or get the GNU multi-alternative to
561 // choose ADDS/ADD appropriately.
562 DO_MBED_LOCKFREE_3OPS(incr, ADDS, "IL", newValue)
563 DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue)
564 
565 DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue)
566 DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue)
567 // K constraint is inverted 12-bit modified immediate constant
568 // (relying on assembler substituting BIC for AND)
569 DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue)
570 #if MBED_EXCLUSIVE_ACCESS_ARM
571 // ARM does not have ORN instruction, so take plain immediates.
572 DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "I", oldValue)
573 #else
574 // Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
575 DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "IK", oldValue)
576 #endif
577 // I constraint is 12-bit modified immediate operand
578 DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS, "I", oldValue)
579 #else // MBED_EXCLUSIVE_ACCESS_THUMB1
580 // L constraint is -7 to +7, suitable for 3-op ADD/SUB
581 // (relying on assembler to swap ADD/SUB)
582 DO_MBED_LOCKFREE_3OPS(incr, ADDS, "L", newValue)
583 DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue)
584 DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue)
585 DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue)
586 DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue)
587 DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue)
588 DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue)
589 #endif
590 
591 DO_MBED_LOCKFREE_EXCHG_OPS()
592 DO_MBED_LOCKFREE_CAS_STRONG_OPS()
593 DO_MBED_LOCKFREE_CAS_WEAK_OPS()
594 
595 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
596  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
597 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
598  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
599 #else // MBED_EXCLUSIVE_ACCESS
600 /* All the operations are locked, so need no ordering parameters */
601 #define DO_MBED_LOCKED_FETCH_OP_ORDERINGS(name) \
602  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint8_t, u8) \
603  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint16_t, u16) \
604  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint32_t, u32) \
605  DO_MBED_LOCKED_FETCH_OP_ORDERING(name, uint64_t, u64)
606 #define DO_MBED_LOCKED_CAS_ORDERINGS(name) \
607  DO_MBED_LOCKED_CAS_ORDERING(name, uint8_t, u8) \
608  DO_MBED_LOCKED_CAS_ORDERING(name, uint16_t, u16) \
609  DO_MBED_LOCKED_CAS_ORDERING(name, uint32_t, u32) \
610  DO_MBED_LOCKED_CAS_ORDERING(name, uint64_t, u64)
611 
613 {
614  return core_util_atomic_flag_test_and_set(valuePtr);
615 }
616 #endif // MBED_EXCLUSIVE_ACCESS
617 
618 /********************* OPERATIONS THAT ARE ALWAYS LOCK-FREE ****************/
619 
620 /* Lock-free loads and stores don't need assembler - just aligned accesses */
621 /* Silly ordering of `T volatile` is because T can be `void *` */
622 #define DO_MBED_LOCKFREE_LOADSTORE(T, V, fn_suffix) \
623 MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
624 { \
625  T value = *valuePtr; \
626  MBED_BARRIER(); \
627  return value; \
628 } \
629  \
630 MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
631 { \
632  MBED_CHECK_LOAD_ORDER(order); \
633  T value = *valuePtr; \
634  MBED_ACQUIRE_BARRIER(order); \
635  return value; \
636 } \
637  \
638 MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
639 { \
640  MBED_BARRIER(); \
641  *valuePtr = value; \
642  MBED_BARRIER(); \
643 } \
644  \
645 MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
646 { \
647  MBED_CHECK_STORE_ORDER(order); \
648  MBED_RELEASE_BARRIER(order); \
649  *valuePtr = value; \
650  MBED_SEQ_CST_BARRIER(order); \
651 }
652 
654 {
655  MBED_BARRIER();
656  flagPtr->_flag = false;
657  MBED_BARRIER();
658 }
659 
661 {
662  MBED_CHECK_STORE_ORDER(order);
663  MBED_RELEASE_BARRIER(order);
664  flagPtr->_flag = false;
665  MBED_SEQ_CST_BARRIER(order);
666 }
667 
668 #ifdef __cplusplus
669 // Temporarily turn off extern "C", so we can provide non-volatile load/store
670 // overloads for efficiency. All these functions are static inline, so this has
671 // no linkage effect exactly, it just permits the overloads.
672 } // extern "C"
673 
674 // For efficiency it's worth having non-volatile overloads
676 {
677  MBED_BARRIER();
678  flagPtr->_flag = false;
679  MBED_BARRIER();
680 }
681 
683 {
684  MBED_RELEASE_BARRIER(order);
685  flagPtr->_flag = false;
686  MBED_SEQ_CST_BARRIER(order);
687 }
688 
689 DO_MBED_LOCKFREE_LOADSTORE(uint8_t,, u8)
690 DO_MBED_LOCKFREE_LOADSTORE(uint16_t,, u16)
691 DO_MBED_LOCKFREE_LOADSTORE(uint32_t,, u32)
692 DO_MBED_LOCKFREE_LOADSTORE(int8_t,, s8)
693 DO_MBED_LOCKFREE_LOADSTORE(int16_t,, s16)
694 DO_MBED_LOCKFREE_LOADSTORE(int32_t,, s32)
695 DO_MBED_LOCKFREE_LOADSTORE(bool,, bool)
696 DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
697 
698 #endif
699 
700 DO_MBED_LOCKFREE_LOADSTORE(uint8_t, volatile, u8)
701 DO_MBED_LOCKFREE_LOADSTORE(uint16_t, volatile, u16)
702 DO_MBED_LOCKFREE_LOADSTORE(uint32_t, volatile, u32)
703 DO_MBED_LOCKFREE_LOADSTORE(int8_t, volatile, s8)
704 DO_MBED_LOCKFREE_LOADSTORE(int16_t, volatile, s16)
705 DO_MBED_LOCKFREE_LOADSTORE(int32_t, volatile, s32)
706 DO_MBED_LOCKFREE_LOADSTORE(bool, volatile, bool)
707 DO_MBED_LOCKFREE_LOADSTORE(void *, volatile, ptr)
708 
709 #ifdef __cplusplus
710 extern "C" {
711 #endif
712 
713 /********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
714 
715 MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
716 {
717  return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr);
718 }
719 
720 MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
721 {
722  core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
723 }
724 
725 #define DO_MBED_SIGNED_CAS_OP(name, T, fn_suffix) \
726 MBED_FORCEINLINE bool core_util_atomic_##name##_s##fn_suffix(volatile T *ptr, \
727  T *expectedCurrentValue, T desiredValue) \
728 { \
729  return core_util_atomic_##name##_u##fn_suffix((volatile u##T *)ptr, \
730  (u##T *)expectedCurrentValue, (u##T)desiredValue); \
731 } \
732  \
733 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *ptr, \
734  T *expectedCurrentValue, T desiredValue, \
735  mbed_memory_order success, mbed_memory_order failure) \
736 { \
737  return core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)ptr, \
738  (u##T *)expectedCurrentValue, (u##T)desiredValue, success, failure); \
739 }
740 
741 #define DO_MBED_SIGNED_CAS_OPS(name) \
742  DO_MBED_SIGNED_CAS_OP(name, int8_t, 8) \
743  DO_MBED_SIGNED_CAS_OP(name, int16_t, 16) \
744  DO_MBED_SIGNED_CAS_OP(name, int32_t, 32) \
745  DO_MBED_SIGNED_CAS_OP(name, int64_t, 64)
746 
747 DO_MBED_SIGNED_CAS_OPS(cas)
748 DO_MBED_SIGNED_CAS_OPS(compare_exchange_weak)
749 
750 MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
751 {
752  return core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
753 }
754 
755 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
756 {
757  return core_util_atomic_cas_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
758 }
759 
760 inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
761 {
762 #if MBED_ATOMIC_PTR_SIZE == 32
764  (volatile uint32_t *)ptr,
765  (uint32_t *)expectedCurrentValue,
766  (uint32_t)desiredValue);
767 #else
769  (volatile uint64_t *)ptr,
770  (uint64_t *)expectedCurrentValue,
771  (uint64_t)desiredValue);
772 #endif
773 }
774 
775 MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
776 {
777 #if MBED_ATOMIC_PTR_SIZE == 32
779  (volatile uint32_t *)ptr,
780  (uint32_t *)expectedCurrentValue,
781  (uint32_t)desiredValue,
782  success, failure);
783 #else
785  (volatile uint64_t *)ptr,
786  (uint64_t *)expectedCurrentValue,
787  (uint64_t)desiredValue,
788  success, failure);
789 #endif
790 }
791 
792 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
793 {
794  return core_util_atomic_compare_exchange_weak_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
795 }
796 
797 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
798 {
799  return core_util_atomic_compare_exchange_weak_explicit_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue, success, failure);
800 }
801 
802 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
803 {
804 #if MBED_ATOMIC_PTR_SIZE == 32
806  (volatile uint32_t *)ptr,
807  (uint32_t *)expectedCurrentValue,
808  (uint32_t)desiredValue);
809 #else
811  (volatile uint64_t *)ptr,
812  (uint64_t *)expectedCurrentValue,
813  (uint64_t)desiredValue);
814 #endif
815 }
816 
817 MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
818 {
819 #if MBED_ATOMIC_PTR_SIZE == 32
821  (volatile uint32_t *)ptr,
822  (uint32_t *)expectedCurrentValue,
823  (uint32_t)desiredValue,
824  success, failure);
825 #else
827  (volatile uint64_t *)ptr,
828  (uint64_t *)expectedCurrentValue,
829  (uint64_t)desiredValue,
830  success, failure);
831 #endif
832 }
833 
834 #define DO_MBED_SIGNED_FETCH_OP(name, T, fn_suffix) \
835 MBED_FORCEINLINE T core_util_atomic_##name##_s##fn_suffix(volatile T *valuePtr, T arg) \
836 { \
837  return (T)core_util_atomic_##name##_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg); \
838 }
839 
840 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, T, fn_suffix) \
841 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_s##fn_suffix(volatile T *valuePtr, T arg, mbed_memory_order order) \
842 { \
843  return (T)core_util_atomic_##name##_explicit_u##fn_suffix((volatile u##T *)valuePtr, (u##T)arg, order); \
844 }
845 
846 #define DO_MBED_SIGNED_FETCH_OPS(name) \
847  DO_MBED_SIGNED_FETCH_OP(name, int8_t, 8) \
848  DO_MBED_SIGNED_FETCH_OP(name, int16_t, 16) \
849  DO_MBED_SIGNED_FETCH_OP(name, int32_t, 32) \
850  DO_MBED_SIGNED_FETCH_OP(name, int64_t, 64)
851 
852 #define DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(name) \
853  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int8_t, 8) \
854  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int16_t, 16) \
855  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int32_t, 32) \
856  DO_MBED_SIGNED_EXPLICIT_FETCH_OP(name, int64_t, 64)
857 
858 DO_MBED_SIGNED_FETCH_OPS(exchange)
859 DO_MBED_SIGNED_FETCH_OPS(incr)
860 DO_MBED_SIGNED_FETCH_OPS(decr)
861 DO_MBED_SIGNED_FETCH_OPS(fetch_add)
862 DO_MBED_SIGNED_FETCH_OPS(fetch_sub)
863 
864 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(exchange)
865 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_add)
866 DO_MBED_SIGNED_EXPLICIT_FETCH_OPS(fetch_sub)
867 
868 MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
869 {
870  return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
871 }
872 
873 MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
874 {
875  return (bool)core_util_atomic_exchange_explicit_u8((volatile uint8_t *)valuePtr, desiredValue, order);
876 }
877 
878 inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
879 {
880 #if MBED_ATOMIC_PTR_SIZE == 32
881  return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
882 #else
883  return (void *)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
884 #endif
885 }
886 
887 MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
888 {
889 #if MBED_ATOMIC_PTR_SIZE == 32
890  return (void *)core_util_atomic_exchange_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue, order);
891 #else
892  return (void *)core_util_atomic_exchange_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue, order);
893 #endif
894 }
895 
896 inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
897 {
898 #if MBED_ATOMIC_PTR_SIZE == 32
899  return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
900 #else
901  return (void *)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
902 #endif
903 }
904 
905 inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
906 {
907 #if MBED_ATOMIC_PTR_SIZE == 32
908  return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
909 #else
910  return (void *)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
911 #endif
912 }
913 
914 MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
915 {
916 #if MBED_ATOMIC_PTR_SIZE == 32
917  return (void *)core_util_atomic_fetch_add_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
918 #else
919  return (void *)core_util_atomic_fetch_add_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
920 #endif
921 }
922 
923 MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
924 {
925 #if MBED_ATOMIC_PTR_SIZE == 32
926  return (void *)core_util_atomic_fetch_add_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
927 #else
928  return (void *)core_util_atomic_fetch_add_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
929 #endif
930 }
931 
932 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
933 {
934 #if MBED_ATOMIC_PTR_SIZE == 32
935  return (void *)core_util_atomic_fetch_sub_u32((volatile uint32_t *)valuePtr, (uint32_t)arg);
936 #else
937  return (void *)core_util_atomic_fetch_sub_u64((volatile uint64_t *)valuePtr, (uint64_t)arg);
938 #endif
939 }
940 
941 MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
942 {
943 #if MBED_ATOMIC_PTR_SIZE == 32
944  return (void *)core_util_atomic_fetch_sub_explicit_u32((volatile uint32_t *)valuePtr, (uint32_t)arg, order);
945 #else
946  return (void *)core_util_atomic_fetch_sub_explicit_u64((volatile uint64_t *)valuePtr, (uint64_t)arg, order);
947 #endif
948 }
949 
950 /***************** DUMMY EXPLICIT ORDERING FOR LOCKED OPS *****************/
951 
952 /* Need to throw away the ordering information for all locked operations */
953 MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
954 {
955  MBED_CHECK_LOAD_ORDER(order);
956  return core_util_atomic_load_u64(valuePtr);
957 }
958 
959 MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
960 {
961  MBED_CHECK_LOAD_ORDER(order);
962  return core_util_atomic_load_s64(valuePtr);
963 }
964 
965 MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, MBED_UNUSED mbed_memory_order order)
966 {
967  MBED_CHECK_STORE_ORDER(order);
968  core_util_atomic_store_u64(valuePtr, desiredValue);
969 }
970 
971 MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, MBED_UNUSED mbed_memory_order order)
972 {
973  MBED_CHECK_STORE_ORDER(order);
974  core_util_atomic_store_s64(valuePtr, desiredValue);
975 }
976 
977 #define DO_MBED_LOCKED_FETCH_OP_ORDERING(name, T, fn_suffix) \
978 MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
979  volatile T *valuePtr, T arg, MBED_UNUSED mbed_memory_order order) \
980 { \
981  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
982 }
983 
984 #define DO_MBED_LOCKED_CAS_ORDERING(name, T, fn_suffix) \
985 MBED_FORCEINLINE bool core_util_atomic_##name##_explicit_##fn_suffix( \
986  volatile T *ptr, T *expectedCurrentValue, T desiredValue, \
987  MBED_UNUSED mbed_memory_order success, \
988  MBED_UNUSED mbed_memory_order failure) \
989 { \
990  MBED_CHECK_CAS_ORDER(success, failure); \
991  return core_util_atomic_##name##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
992 }
993 
994 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(exchange)
995 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_add)
996 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_sub)
997 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_and)
998 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_or)
999 DO_MBED_LOCKED_FETCH_OP_ORDERINGS(fetch_xor)
1000 DO_MBED_LOCKED_CAS_ORDERINGS(cas)
1001 DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1002 
1003 #ifdef __cplusplus
1004 } // extern "C"
1005 
1006 /***************** TEMPLATE IMPLEMENTATIONS *****************/
1007 
1008 /* Each of these groups provides specialisations for the T template for each of
1009  * the small types (there is no base implementation), and the base implementation
1010  * of the T * template.
1011  */
1012 #define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
1013 template<> \
1014 inline T core_util_atomic_load(const volatile T *valuePtr) \
1015 { \
1016  return core_util_atomic_load_##fn_suffix(valuePtr); \
1017 } \
1018  \
1019 template<> \
1020 inline T core_util_atomic_load(const T *valuePtr) \
1021 { \
1022  return core_util_atomic_load_##fn_suffix(valuePtr); \
1023 } \
1024  \
1025 template<> \
1026 inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \
1027 { \
1028  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1029 } \
1030  \
1031 template<> \
1032 inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \
1033 { \
1034  return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1035 }
1036 
1037 template<typename T>
1038 inline T *core_util_atomic_load(T *const volatile *valuePtr)
1039 {
1040  return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
1041 }
1042 
1043 template<typename T>
1044 inline T *core_util_atomic_load(T *const *valuePtr)
1045 {
1046  return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
1047 }
1048 
1049 template<typename T>
1050 inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order)
1051 {
1052  return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
1053 }
1054 
1055 template<typename T>
1056 inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order)
1057 {
1058  return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
1059 }
1060 
1061 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint8_t, u8)
1062 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t, u16)
1063 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t, u32)
1064 DO_MBED_ATOMIC_LOAD_TEMPLATE(uint64_t, u64)
1065 DO_MBED_ATOMIC_LOAD_TEMPLATE(int8_t, s8)
1066 DO_MBED_ATOMIC_LOAD_TEMPLATE(int16_t, s16)
1067 DO_MBED_ATOMIC_LOAD_TEMPLATE(int32_t, s32)
1068 DO_MBED_ATOMIC_LOAD_TEMPLATE(int64_t, s64)
1069 DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
1070 
1071 #define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
1072 template<> \
1073 inline void core_util_atomic_store(volatile T *valuePtr, T val) \
1074 { \
1075  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1076 } \
1077  \
1078 template<> \
1079 inline void core_util_atomic_store(T *valuePtr, T val) \
1080 { \
1081  core_util_atomic_store_##fn_suffix(valuePtr, val); \
1082 } \
1083  \
1084 template<> \
1085 inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \
1086 { \
1087  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1088 } \
1089  \
1090 template<> \
1091 inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \
1092 { \
1093  core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1094 }
1095 
1096 template<typename T>
1097 inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
1098 {
1099  core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
1100 }
1101 
1102 template<typename T>
1103 inline void core_util_atomic_store(T **valuePtr, T *val)
1104 {
1105  core_util_atomic_store_ptr((void **) valuePtr, val);
1106 }
1107 
1108 template<typename T>
1109 inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order)
1110 {
1111  core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
1112 }
1113 
1114 template<typename T>
1115 inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order)
1116 {
1117  core_util_atomic_store_ptr((void **) valuePtr, val, order);
1118 }
1119 
1120 DO_MBED_ATOMIC_STORE_TEMPLATE(uint8_t, u8)
1121 DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t, u16)
1122 DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t, u32)
1123 DO_MBED_ATOMIC_STORE_TEMPLATE(uint64_t, u64)
1124 DO_MBED_ATOMIC_STORE_TEMPLATE(int8_t, s8)
1125 DO_MBED_ATOMIC_STORE_TEMPLATE(int16_t, s16)
1126 DO_MBED_ATOMIC_STORE_TEMPLATE(int32_t, s32)
1127 DO_MBED_ATOMIC_STORE_TEMPLATE(int64_t, s64)
1128 DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
1129 
1130 #define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
1131 template<> inline \
1132 bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
1133 { \
1134  return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1135 }
1136 
1137 template<typename T>
1138 inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
1139 {
1140  return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1141 }
1142 
1143 template<typename T>
1144 inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
1145 {
1146  return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
1147 }
1148 
1149 #define DO_MBED_ATOMIC_CAS_TEMPLATES(tname, fname) \
1150  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint8_t, u8) \
1151  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint16_t, u16) \
1152  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint32_t, u32) \
1153  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, uint64_t, u64) \
1154  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int8_t, s8) \
1155  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int16_t, s16) \
1156  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int32_t, s32) \
1157  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, int64_t, s64) \
1158  DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, bool, bool)
1159 
1160 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_strong, cas)
1161 DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1162 
1163 #define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
1164 template<> \
1165 inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
1166 { \
1167  return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1168 } \
1169  \
1170 template<> \
1171 inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1172  mbed_memory_order order) \
1173 { \
1174  return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
1175 }
1176 
1177 
1178 template<>
1179 inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg)
1180 {
1181  return core_util_atomic_exchange_bool(valuePtr, arg);
1182 }
1183 
1184 template<>
1185 inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order)
1186 {
1187  return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
1188 }
1189 
1190 template<typename T>
1191 inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg)
1192 {
1193  return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
1194 }
1195 
1196 template<typename T>
1197 inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order)
1198 {
1199  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
1200 }
1201 
1202 template<typename T>
1203 inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg)
1204 {
1205  return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1206 }
1207 
1208 template<typename T>
1209 inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
1210 {
1211  return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1212 }
1213 
1214 template<typename T>
1215 inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg)
1216 {
1217  return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
1218 }
1219 
1220 template<typename T>
1221 inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
1222 {
1223  return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
1224 }
1225 
1226 
1227 #define DO_MBED_ATOMIC_OP_U_TEMPLATES(name) \
1228  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1229  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1230  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1231  DO_MBED_ATOMIC_OP_TEMPLATE(name, uint64_t, u64)
1232 
1233 #define DO_MBED_ATOMIC_OP_S_TEMPLATES(name) \
1234  DO_MBED_ATOMIC_OP_TEMPLATE(name, int8_t, s8) \
1235  DO_MBED_ATOMIC_OP_TEMPLATE(name, int16_t, s16) \
1236  DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \
1237  DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64)
1238 
1239 DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
1240 DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
1241 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
1242 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_add)
1243 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_sub)
1244 DO_MBED_ATOMIC_OP_S_TEMPLATES(fetch_sub)
1245 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
1246 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
1247 DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
1248 
1249 #endif // __cplusplus
1250 
1251 #undef MBED_DOP_REG
1252 #undef MBED_CMP_IMM
1253 #undef MBED_SUB3_IMM
1254 #undef DO_MBED_LOCKFREE_EXCHG_ASM
1255 #undef DO_MBED_LOCKFREE_3OP_ASM
1256 #undef DO_MBED_LOCKFREE_2OP_ASM
1257 #undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
1258 #undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
1259 #undef DO_MBED_LOCKFREE_LOADSTORE
1260 #undef DO_MBED_LOCKFREE_EXCHG_OP
1261 #undef DO_MBED_LOCKFREE_CAS_WEAK_OP
1262 #undef DO_MBED_LOCKFREE_CAS_STRONG_OP
1263 #undef DO_MBED_LOCKFREE_2OP
1264 #undef DO_MBED_LOCKFREE_3OP
1265 #undef DO_MBED_LOCKFREE_EXCHG_OPS
1266 #undef DO_MBED_LOCKFREE_2OPS
1267 #undef DO_MBED_LOCKFREE_3OPS
1268 #undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
1269 #undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
1270 #undef DO_MBED_SIGNED_CAS_OP
1271 #undef DO_MBED_SIGNED_CAS_OPS
1272 #undef DO_MBED_SIGNED_FETCH_OP
1273 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OP
1274 #undef DO_MBED_SIGNED_FETCH_OPS
1275 #undef DO_MBED_SIGNED_EXPLICIT_FETCH_OPS
1276 #undef DO_MBED_LOCKED_FETCH_OP_ORDERINGS
1277 #undef DO_MBED_LOCKED_CAS_ORDERINGS
1278 #undef MBED_ACQUIRE_BARRIER
1279 #undef MBED_RELEASE_BARRIER
1280 #undef MBED_SEQ_CST_BARRIER
1281 #undef DO_MBED_ATOMIC_LOAD_TEMPLATE
1282 #undef DO_MBED_ATOMIC_STORE_TEMPLATE
1283 #undef DO_MBED_ATOMIC_EXCHANGE_TEMPLATE
1284 #undef DO_MBED_ATOMIC_CAS_TEMPLATE
1285 #undef DO_MBED_ATOMIC_CAS_TEMPLATES
1286 #undef DO_MBED_ATOMIC_FETCH_TEMPLATE
1287 #undef DO_MBED_ATOMIC_FETCH_U_TEMPLATES
1288 #undef DO_MBED_ATOMIC_FETCH_S_TEMPLATES
1289 
1290 #endif
bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic subtract.
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic store.
#define MBED_FORCEINLINE
MBED_FORCEINLINE Declare a function that must always be inlined.
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
Atomic store.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic add.
bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic subtract.
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic store.
bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic decrement.
MBED_FORCEINLINE void * core_util_atomic_load_ptr(void *const volatile *valuePtr)
Atomic load.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic add.
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic exchange.
bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic subtract.
uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_clear
A lock-free, primitive atomic flag.
Definition: mbed_atomic.h:115
void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
Atomic store.
void * core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic decrement.
void * core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic increment.
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic increment.
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic decrement.
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic subtract.
bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
mbed_memory_order
Memory order constraints for atomic operations.
Definition: mbed_atomic.h:51
void * core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic subtract.
uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE void * core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order)
Atomic load.
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
Atomic test and set.
uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order)
\ copydoc core_util_atomic_flag_test_and_set
MBED_FORCEINLINE void * core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
Atomic exchange.
bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
Atomic load.
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
Atomic load.
uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
Atomic add.
uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic exchange.
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order)
Atomic load.
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
Atomic exchange.
bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *value)
Atomic store.
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
Atomic clear.
#define MBED_UNUSED
MBED_UNUSED Declare a function argument to be unused, suppressing compiler warnings.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic subtract.
bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, mbed_memory_order order)
Atomic load.
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.