Mistake on this page?
Report an issue in GitHub or email us
rtx_core_cm.h
1 /*
2  * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * -----------------------------------------------------------------------------
19  *
20  * Project: CMSIS-RTOS RTX
21  * Title: Cortex-M Core definitions
22  *
23  * -----------------------------------------------------------------------------
24  */
25 
26 #ifndef RTX_CORE_CM_H_
27 #define RTX_CORE_CM_H_
28 
29 #ifndef RTX_CORE_C_H_
30 #include "RTE_Components.h"
31 #include CMSIS_device_header
32 #endif
33 
34 #include <stdbool.h>
35 typedef bool bool_t;
36 #define FALSE (0)
37 #define TRUE (1)
38 
39 #ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
40 #define DOMAIN_NS 1
41 #endif
42 
43 #ifndef DOMAIN_NS
44 #define DOMAIN_NS 0
45 #endif
46 
47 #if (DOMAIN_NS == 1)
48 #if ((!defined(__ARM_ARCH_8M_BASE__) || (__ARM_ARCH_8M_BASE__ == 0)) && \
49  (!defined(__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8M_MAIN__ == 0)))
50 #error "Non-secure domain requires ARMv8-M Architecture!"
51 #endif
52 #endif
53 
54 #ifndef EXCLUSIVE_ACCESS
55 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
56  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
57  (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) || \
58  (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)))
59 #define EXCLUSIVE_ACCESS 1
60 #else
61 #define EXCLUSIVE_ACCESS 0
62 #endif
63 #endif
64 
65 #define OS_TICK_HANDLER SysTick_Handler
66 
67 /// xPSR_Initialization Value
68 /// \param[in] privileged true=privileged, false=unprivileged
69 /// \param[in] thumb true=Thumb, false=ARM
70 /// \return xPSR Init Value
71 __STATIC_INLINE uint32_t xPSR_InitVal (bool_t privileged, bool_t thumb) {
72  (void)privileged;
73  (void)thumb;
74  return (0x01000000U);
75 }
76 
77 // Stack Frame:
78 // - Extended: S16-S31, R4-R11, R0-R3, R12, LR, PC, xPSR, S0-S15, FPSCR
79 // - Basic: R4-R11, R0-R3, R12, LR, PC, xPSR
80 
81 /// Stack Frame Initialization Value (EXC_RETURN[7..0])
82 #if (DOMAIN_NS == 1)
83 #define STACK_FRAME_INIT_VAL 0xBCU
84 #else
85 #define STACK_FRAME_INIT_VAL 0xFDU
86 #endif
87 
88 /// Stack Offset of Register R0
89 /// \param[in] stack_frame Stack Frame (EXC_RETURN[7..0])
90 /// \return R0 Offset
91 __STATIC_INLINE uint32_t StackOffsetR0 (uint8_t stack_frame) {
92 #if (__FPU_USED == 1U)
93  return (((stack_frame & 0x10U) == 0U) ? ((16U+8U)*4U) : (8U*4U));
94 #else
95  (void)stack_frame;
96  return (8U*4U);
97 #endif
98 }
99 
100 
101 // ==== Core functions ====
102 
103 //lint -sem(__get_CONTROL, pure)
104 //lint -sem(__get_IPSR, pure)
105 //lint -sem(__get_PRIMASK, pure)
106 //lint -sem(__get_BASEPRI, pure)
107 
108 /// Check if running Privileged
109 /// \return true=privileged, false=unprivileged
110 __STATIC_INLINE bool_t IsPrivileged (void) {
111  return ((__get_CONTROL() & 1U) == 0U);
112 }
113 
114 /// Check if in IRQ Mode
115 /// \return true=IRQ, false=thread
116 __STATIC_INLINE bool_t IsIrqMode (void) {
117  return (__get_IPSR() != 0U);
118 }
119 
120 /// Check if IRQ is Masked
121 /// \return true=masked, false=not masked
122 __STATIC_INLINE bool_t IsIrqMasked (void) {
123 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
124  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
125  (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)))
126  return ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U));
127 #else
128  return (__get_PRIMASK() != 0U);
129 #endif
130 }
131 
132 
133 // ==== Core Peripherals functions ====
134 
135 /// Setup SVC and PendSV System Service Calls
136 __STATIC_INLINE void SVC_Setup (void) {
137 #if ((defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \
138  (defined(__CORTEX_M) && (__CORTEX_M == 7U)))
139  uint32_t p, n;
140 
141  SCB->SHPR[10] = 0xFFU;
142  n = 32U - (uint32_t)__CLZ(~(SCB->SHPR[10] | 0xFFFFFF00U));
143  p = NVIC_GetPriorityGrouping();
144  if (p >= n) {
145  n = p + 1U;
146  }
147  SCB->SHPR[7] = (uint8_t)(0xFEU << n);
148 #elif (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
149  uint32_t n;
150 
151  SCB->SHPR[1] |= 0x00FF0000U;
152  n = SCB->SHPR[1];
153  SCB->SHPR[0] |= (n << (8+1)) & 0xFC000000U;
154 #elif ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
155  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)))
156  uint32_t p, n;
157 
158  SCB->SHP[10] = 0xFFU;
159  n = 32U - (uint32_t)__CLZ(~(SCB->SHP[10] | 0xFFFFFF00U));
160  p = NVIC_GetPriorityGrouping();
161  if (p >= n) {
162  n = p + 1U;
163  }
164  SCB->SHP[7] = (uint8_t)(0xFEU << n);
165 #elif (defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0))
166  uint32_t n;
167 
168  SCB->SHP[1] |= 0x00FF0000U;
169  n = SCB->SHP[1];
170  SCB->SHP[0] |= (n << (8+1)) & 0xFC000000U;
171 #endif
172 }
173 
174 /// Get Pending SV (Service Call) Flag
175 /// \return Pending SV Flag
176 __STATIC_INLINE uint8_t GetPendSV (void) {
177  return ((uint8_t)((SCB->ICSR & (SCB_ICSR_PENDSVSET_Msk)) >> 24));
178 }
179 
180 /// Clear Pending SV (Service Call) Flag
181 __STATIC_INLINE void ClrPendSV (void) {
182  SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
183 }
184 
185 /// Set Pending SV (Service Call) Flag
186 __STATIC_INLINE void SetPendSV (void) {
187  SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
188 }
189 
190 
191 // ==== Service Calls definitions ====
192 
193 //lint -save -e9023 -e9024 -e9026 "Function-like macros using '#/##'" [MISRA Note 10]
194 
195 #if defined(__CC_ARM)
196 
197 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
198  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
199  (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)))
200 #define __SVC_INDIRECT(n) __svc_indirect(n)
201 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \
202  (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)))
203 #define __SVC_INDIRECT(n) __svc_indirect_r7(n)
204 #endif
205 
206 #define SVC0_0N(f,t) \
207 __SVC_INDIRECT(0) t svc##f (t(*)()); \
208 __attribute__((always_inline)) \
209 __STATIC_INLINE t __svc##f (void) { \
210  svc##f(svcRtx##f); \
211 }
212 
213 #define SVC0_0(f,t) \
214 __SVC_INDIRECT(0) t svc##f (t(*)()); \
215 __attribute__((always_inline)) \
216 __STATIC_INLINE t __svc##f (void) { \
217  return svc##f(svcRtx##f); \
218 }
219 
220 #define SVC0_1N(f,t,t1) \
221 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
222 __attribute__((always_inline)) \
223 __STATIC_INLINE t __svc##f (t1 a1) { \
224  svc##f(svcRtx##f,a1); \
225 }
226 
227 #define SVC0_1(f,t,t1) \
228 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
229 __attribute__((always_inline)) \
230 __STATIC_INLINE t __svc##f (t1 a1) { \
231  return svc##f(svcRtx##f,a1); \
232 }
233 
234 #define SVC0_2(f,t,t1,t2) \
235 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \
236 __attribute__((always_inline)) \
237 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
238  return svc##f(svcRtx##f,a1,a2); \
239 }
240 
241 #define SVC0_3(f,t,t1,t2,t3) \
242 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \
243 __attribute__((always_inline)) \
244 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
245  return svc##f(svcRtx##f,a1,a2,a3); \
246 }
247 
248 #define SVC0_4(f,t,t1,t2,t3,t4) \
249 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \
250 __attribute__((always_inline)) \
251 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
252  return svc##f(svcRtx##f,a1,a2,a3,a4); \
253 }
254 
255 #elif defined(__ICCARM__)
256 
257 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
258  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
259  (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)))
260 #define SVC_ArgF(f) \
261  __asm( \
262  "mov r12,%0\n" \
263  :: "r"(&f): "r12" \
264  );
265 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \
266  (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)))
267 #define SVC_ArgF(f) \
268  __asm( \
269  "mov r7,%0\n" \
270  :: "r"(&f): "r7" \
271  );
272 #endif
273 
274 #define STRINGIFY(a) #a
275 #define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
276 
277 #define SVC0_0N(f,t) \
278 __SVC_INDIRECT(0) t svc##f (); \
279 __attribute__((always_inline)) \
280 __STATIC_INLINE t __svc##f (void) { \
281  SVC_ArgF(svcRtx##f); \
282  svc##f(); \
283 }
284 
285 #define SVC0_0(f,t) \
286 __SVC_INDIRECT(0) t svc##f (); \
287 __attribute__((always_inline)) \
288 __STATIC_INLINE t __svc##f (void) { \
289  SVC_ArgF(svcRtx##f); \
290  return svc##f(); \
291 }
292 
293 #define SVC0_1N(f,t,t1) \
294 __SVC_INDIRECT(0) t svc##f (t1 a1); \
295 __attribute__((always_inline)) \
296 __STATIC_INLINE t __svc##f (t1 a1) { \
297  SVC_ArgF(svcRtx##f); \
298  svc##f(a1); \
299 }
300 
301 #define SVC0_1(f,t,t1) \
302 __SVC_INDIRECT(0) t svc##f (t1 a1); \
303 __attribute__((always_inline)) \
304 __STATIC_INLINE t __svc##f (t1 a1) { \
305  SVC_ArgF(svcRtx##f); \
306  return svc##f(a1); \
307 }
308 
309 #define SVC0_2(f,t,t1,t2) \
310 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \
311 __attribute__((always_inline)) \
312 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
313  SVC_ArgF(svcRtx##f); \
314  return svc##f(a1,a2); \
315 }
316 
317 #define SVC0_3(f,t,t1,t2,t3) \
318 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \
319 __attribute__((always_inline)) \
320 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
321  SVC_ArgF(svcRtx##f); \
322  return svc##f(a1,a2,a3); \
323 }
324 
325 #define SVC0_4(f,t,t1,t2,t3,t4) \
326 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \
327 __attribute__((always_inline)) \
328 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
329  SVC_ArgF(svcRtx##f); \
330  return svc##f(a1,a2,a3,a4); \
331 }
332 
333 #else // !(defined(__CC_ARM) || defined(__ICCARM__))
334 
335 //lint -esym(522,__svc*) "Functions '__svc*' are impure (side-effects)"
336 
337 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \
338  (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
339  (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)))
340 #define SVC_RegF "r12"
341 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \
342  (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)))
343 #define SVC_RegF "r7"
344 #endif
345 
346 #define SVC_ArgN(n) \
347 register uint32_t __r##n __ASM("r"#n)
348 
349 #define SVC_ArgR(n,a) \
350 register uint32_t __r##n __ASM("r"#n) = (uint32_t)a
351 
352 #define SVC_ArgF(f) \
353 register uint32_t __rf __ASM(SVC_RegF) = (uint32_t)f
354 
355 #define SVC_In0 "r"(__rf)
356 #define SVC_In1 "r"(__rf),"r"(__r0)
357 #define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1)
358 #define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2)
359 #define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3)
360 
361 #define SVC_Out0
362 #define SVC_Out1 "=r"(__r0)
363 
364 #define SVC_CL0
365 #define SVC_CL1 "r1"
366 #define SVC_CL2 "r0","r1"
367 
368 #define SVC_Call0(in, out, cl) \
369  __ASM volatile ("svc 0" : out : in : cl)
370 
371 #define SVC0_0N(f,t) \
372 __attribute__((always_inline)) \
373 __STATIC_INLINE t __svc##f (void) { \
374  SVC_ArgF(svcRtx##f); \
375  SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2); \
376 }
377 
378 #define SVC0_0(f,t) \
379 __attribute__((always_inline)) \
380 __STATIC_INLINE t __svc##f (void) { \
381  SVC_ArgN(0); \
382  SVC_ArgF(svcRtx##f); \
383  SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1); \
384  return (t) __r0; \
385 }
386 
387 #define SVC0_1N(f,t,t1) \
388 __attribute__((always_inline)) \
389 __STATIC_INLINE t __svc##f (t1 a1) { \
390  SVC_ArgR(0,a1); \
391  SVC_ArgF(svcRtx##f); \
392  SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1); \
393 }
394 
395 #define SVC0_1(f,t,t1) \
396 __attribute__((always_inline)) \
397 __STATIC_INLINE t __svc##f (t1 a1) { \
398  SVC_ArgR(0,a1); \
399  SVC_ArgF(svcRtx##f); \
400  SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1); \
401  return (t) __r0; \
402 }
403 
404 #define SVC0_2(f,t,t1,t2) \
405 __attribute__((always_inline)) \
406 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
407  SVC_ArgR(0,a1); \
408  SVC_ArgR(1,a2); \
409  SVC_ArgF(svcRtx##f); \
410  SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0); \
411  return (t) __r0; \
412 }
413 
414 #define SVC0_3(f,t,t1,t2,t3) \
415 __attribute__((always_inline)) \
416 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
417  SVC_ArgR(0,a1); \
418  SVC_ArgR(1,a2); \
419  SVC_ArgR(2,a3); \
420  SVC_ArgF(svcRtx##f); \
421  SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0); \
422  return (t) __r0; \
423 }
424 
425 #define SVC0_4(f,t,t1,t2,t3,t4) \
426 __attribute__((always_inline)) \
427 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
428  SVC_ArgR(0,a1); \
429  SVC_ArgR(1,a2); \
430  SVC_ArgR(2,a3); \
431  SVC_ArgR(3,a4); \
432  SVC_ArgF(svcRtx##f); \
433  SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0); \
434  return (t) __r0; \
435 }
436 
437 #endif
438 
439 //lint -restore [MISRA Note 10]
440 
441 
442 // ==== Exclusive Access Operation ====
443 
444 #if (EXCLUSIVE_ACCESS == 1)
445 
446 //lint ++flb "Library Begin" [MISRA Note 12]
447 
448 /// Atomic Access Operation: Write (8-bit)
449 /// \param[in] mem Memory address
450 /// \param[in] val Value to write
451 /// \return Previous value
452 #if defined(__CC_ARM)
453 static __asm uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
454  mov r2,r0
455 1
456  ldrexb r0,[r2]
457  strexb r3,r1,[r2]
458  cbz r3,%F2
459  b %B1
460 2
461  bx lr
462 }
463 #else
464 __STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
465 #ifdef __ICCARM__
466 #pragma diag_suppress=Pe550
467 #endif
468  register uint32_t res;
469 #ifdef __ICCARM__
470 #pragma diag_default=Pe550
471 #endif
472  register uint8_t ret;
473 
474  __ASM volatile (
475 #ifndef __ICCARM__
476  ".syntax unified\n\t"
477 #endif
478  "1:\n\t"
479  "ldrexb %[ret],[%[mem]]\n\t"
480  "strexb %[res],%[val],[%[mem]]\n\t"
481  "cbz %[res],2f\n\t"
482  "b 1b\n"
483  "2:"
484  : [ret] "=&l" (ret),
485  [res] "=&l" (res)
486  : [mem] "l" (mem),
487  [val] "l" (val)
488  : "memory"
489  );
490 
491  return ret;
492 }
493 #endif
494 
495 /// Atomic Access Operation: Set bits (32-bit)
496 /// \param[in] mem Memory address
497 /// \param[in] bits Bit mask
498 /// \return New value
499 #if defined(__CC_ARM)
500 static __asm uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
501  mov r2,r0
502 1
503  ldrex r0,[r2]
504  orr r0,r0,r1
505  strex r3,r0,[r2]
506  cbz r3,%F2
507  b %B1
508 2
509  bx lr
510 }
511 #else
512 __STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
513 #ifdef __ICCARM__
514 #pragma diag_suppress=Pe550
515 #endif
516  register uint32_t val, res;
517 #ifdef __ICCARM__
518 #pragma diag_default=Pe550
519 #endif
520  register uint32_t ret;
521 
522  __ASM volatile (
523 #ifndef __ICCARM__
524  ".syntax unified\n\t"
525 #endif
526  "1:\n\t"
527  "ldrex %[val],[%[mem]]\n\t"
528 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
529  "mov %[ret],%[val]\n\t"
530  "orrs %[ret],%[bits]\n\t"
531 #else
532  "orr %[ret],%[val],%[bits]\n\t"
533 #endif
534  "strex %[res],%[ret],[%[mem]]\n\t"
535  "cbz %[res],2f\n\t"
536  "b 1b\n"
537  "2:"
538  : [ret] "=&l" (ret),
539  [val] "=&l" (val),
540  [res] "=&l" (res)
541  : [mem] "l" (mem),
542  [bits] "l" (bits)
543 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
544  : "memory", "cc"
545 #else
546  : "memory"
547 #endif
548  );
549 
550  return ret;
551 }
552 #endif
553 
554 /// Atomic Access Operation: Clear bits (32-bit)
555 /// \param[in] mem Memory address
556 /// \param[in] bits Bit mask
557 /// \return Previous value
558 #if defined(__CC_ARM)
559 static __asm uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
560  push {r4,lr}
561  mov r2,r0
562 1
563  ldrex r0,[r2]
564  bic r4,r0,r1
565  strex r3,r4,[r2]
566  cbz r3,%F2
567  b %B1
568 2
569  pop {r4,pc}
570 }
571 #else
572 __STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
573 #ifdef __ICCARM__
574 #pragma diag_suppress=Pe550
575 #endif
576  register uint32_t val, res;
577 #ifdef __ICCARM__
578 #pragma diag_default=Pe550
579 #endif
580  register uint32_t ret;
581 
582  __ASM volatile (
583 #ifndef __ICCARM__
584  ".syntax unified\n\t"
585 #endif
586  "1:\n\t"
587  "ldrex %[ret],[%[mem]]\n\t"
588 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
589  "mov %[val],%[ret]\n\t"
590  "bics %[val],%[bits]\n\t"
591 #else
592  "bic %[val],%[ret],%[bits]\n\t"
593 #endif
594  "strex %[res],%[val],[%[mem]]\n\t"
595  "cbz %[res],2f\n\t"
596  "b 1b\n"
597  "2:"
598  : [ret] "=&l" (ret),
599  [val] "=&l" (val),
600  [res] "=&l" (res)
601  : [mem] "l" (mem),
602  [bits] "l" (bits)
603 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
604  : "memory", "cc"
605 #else
606  : "memory"
607 #endif
608  );
609 
610  return ret;
611 }
612 #endif
613 
614 /// Atomic Access Operation: Check if all specified bits (32-bit) are active and clear them
615 /// \param[in] mem Memory address
616 /// \param[in] bits Bit mask
617 /// \return Active bits before clearing or 0 if not active
618 #if defined(__CC_ARM)
619 static __asm uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
620  push {r4,lr}
621  mov r2,r0
622 1
623  ldrex r0,[r2]
624  and r4,r0,r1
625  cmp r4,r1
626  beq %F2
627  clrex
628  movs r0,#0
629  pop {r4,pc}
630 2
631  bic r4,r0,r1
632  strex r3,r4,[r2]
633  cbz r3,%F3
634  b %B1
635 3
636  pop {r4,pc}
637 }
638 #else
639 __STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
640 #ifdef __ICCARM__
641 #pragma diag_suppress=Pe550
642 #endif
643  register uint32_t val, res;
644 #ifdef __ICCARM__
645 #pragma diag_default=Pe550
646 #endif
647  register uint32_t ret;
648 
649  __ASM volatile (
650 #ifndef __ICCARM__
651  ".syntax unified\n\t"
652 #endif
653  "1:\n\t"
654  "ldrex %[ret],[%[mem]]\n\t"
655 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
656  "mov %[val],%[ret]\n\t"
657  "ands %[val],%[bits]\n\t"
658 #else
659  "and %[val],%[ret],%[bits]\n\t"
660 #endif
661  "cmp %[val],%[bits]\n\t"
662  "beq 2f\n\t"
663  "clrex\n\t"
664  "movs %[ret],#0\n\t"
665  "b 3f\n"
666  "2:\n\t"
667 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
668  "mov %[val],%[ret]\n\t"
669  "bics %[val],%[bits]\n\t"
670 #else
671  "bic %[val],%[ret],%[bits]\n\t"
672 #endif
673  "strex %[res],%[val],[%[mem]]\n\t"
674  "cbz %[res],3f\n\t"
675  "b 1b\n"
676  "3:"
677  : [ret] "=&l" (ret),
678  [val] "=&l" (val),
679  [res] "=&l" (res)
680  : [mem] "l" (mem),
681  [bits] "l" (bits)
682  : "cc", "memory"
683  );
684 
685  return ret;
686 }
687 #endif
688 
689 /// Atomic Access Operation: Check if any specified bits (32-bit) are active and clear them
690 /// \param[in] mem Memory address
691 /// \param[in] bits Bit mask
692 /// \return Active bits before clearing or 0 if not active
693 #if defined(__CC_ARM)
694 static __asm uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
695  push {r4,lr}
696  mov r2,r0
697 1
698  ldrex r0,[r2]
699  tst r0,r1
700  bne %F2
701  clrex
702  movs r0,#0
703  pop {r4,pc}
704 2
705  bic r4,r0,r1
706  strex r3,r4,[r2]
707  cbz r3,%F3
708  b %B1
709 3
710  pop {r4,pc}
711 }
712 #else
713 __STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
714 #ifdef __ICCARM__
715 #pragma diag_suppress=Pe550
716 #endif
717  register uint32_t val, res;
718 #ifdef __ICCARM__
719 #pragma diag_default=Pe550
720 #endif
721  register uint32_t ret;
722 
723  __ASM volatile (
724 #ifndef __ICCARM__
725  ".syntax unified\n\t"
726 #endif
727  "1:\n\t"
728  "ldrex %[ret],[%[mem]]\n\t"
729  "tst %[ret],%[bits]\n\t"
730  "bne 2f\n\t"
731  "clrex\n\t"
732  "movs %[ret],#0\n\t"
733  "b 3f\n"
734  "2:\n\t"
735 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
736  "mov %[val],%[ret]\n\t"
737  "bics %[val],%[bits]\n\t"
738 #else
739  "bic %[val],%[ret],%[bits]\n\t"
740 #endif
741  "strex %[res],%[val],[%[mem]]\n\t"
742  "cbz %[res],3f\n\t"
743  "b 1b\n"
744  "3:"
745  : [ret] "=&l" (ret),
746  [val] "=&l" (val),
747  [res] "=&l" (res)
748  : [mem] "l" (mem),
749  [bits] "l" (bits)
750  : "cc", "memory"
751  );
752 
753  return ret;
754 }
755 #endif
756 
757 /// Atomic Access Operation: Increment (32-bit)
758 /// \param[in] mem Memory address
759 /// \return Previous value
760 #if defined(__CC_ARM)
761 static __asm uint32_t atomic_inc32 (uint32_t *mem) {
762  mov r2,r0
763 1
764  ldrex r0,[r2]
765  adds r1,r0,#1
766  strex r3,r1,[r2]
767  cbz r3,%F2
768  b %B1
769 2
770  bx lr
771 }
772 #else
773 __STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
774 #ifdef __ICCARM__
775 #pragma diag_suppress=Pe550
776 #endif
777  register uint32_t val, res;
778 #ifdef __ICCARM__
779 #pragma diag_default=Pe550
780 #endif
781  register uint32_t ret;
782 
783  __ASM volatile (
784 #ifndef __ICCARM__
785  ".syntax unified\n\t"
786 #endif
787  "1:\n\t"
788  "ldrex %[ret],[%[mem]]\n\t"
789  "adds %[val],%[ret],#1\n\t"
790  "strex %[res],%[val],[%[mem]]\n\t"
791  "cbz %[res],2f\n\t"
792  "b 1b\n"
793  "2:"
794  : [ret] "=&l" (ret),
795  [val] "=&l" (val),
796  [res] "=&l" (res)
797  : [mem] "l" (mem)
798  : "cc", "memory"
799  );
800 
801  return ret;
802 }
803 #endif
804 
805 /// Atomic Access Operation: Increment (16-bit) if Less Than
806 /// \param[in] mem Memory address
807 /// \param[in] max Maximum value
808 /// \return Previous value
809 #if defined(__CC_ARM)
810 static __asm uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
811  push {r4,lr}
812  mov r2,r0
813 1
814  ldrexh r0,[r2]
815  cmp r1,r0
816  bhi %F2
817  clrex
818  pop {r4,pc}
819 2
820  adds r4,r0,#1
821  strexh r3,r4,[r2]
822  cbz r3,%F3
823  b %B1
824 3
825  pop {r4,pc}
826 }
827 #else
828 __STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
829 #ifdef __ICCARM__
830 #pragma diag_suppress=Pe550
831 #endif
832  register uint32_t val, res;
833 #ifdef __ICCARM__
834 #pragma diag_default=Pe550
835 #endif
836  register uint16_t ret;
837 
838  __ASM volatile (
839 #ifndef __ICCARM__
840  ".syntax unified\n\t"
841 #endif
842  "1:\n\t"
843  "ldrexh %[ret],[%[mem]]\n\t"
844  "cmp %[max],%[ret]\n\t"
845  "bhi 2f\n\t"
846  "clrex\n\t"
847  "b 3f\n"
848  "2:\n\t"
849  "adds %[val],%[ret],#1\n\t"
850  "strexh %[res],%[val],[%[mem]]\n\t"
851  "cbz %[res],3f\n\t"
852  "b 1b\n"
853  "3:"
854  : [ret] "=&l" (ret),
855  [val] "=&l" (val),
856  [res] "=&l" (res)
857  : [mem] "l" (mem),
858  [max] "l" (max)
859  : "cc", "memory"
860  );
861 
862  return ret;
863 }
864 #endif
865 
866 /// Atomic Access Operation: Increment (16-bit) and clear on Limit
867 /// \param[in] mem Memory address
868 /// \param[in] max Maximum value
869 /// \return Previous value
870 #if defined(__CC_ARM)
871 static __asm uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
872  push {r4,lr}
873  mov r2,r0
874 1
875  ldrexh r0,[r2]
876  adds r4,r0,#1
877  cmp r1,r4
878  bhi %F2
879  movs r4,#0
880 2
881  strexh r3,r4,[r2]
882  cbz r3,%F3
883  b %B1
884 3
885  pop {r4,pc}
886 }
887 #else
888 __STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
889 #ifdef __ICCARM__
890 #pragma diag_suppress=Pe550
891 #endif
892  register uint32_t val, res;
893 #ifdef __ICCARM__
894 #pragma diag_default=Pe550
895 #endif
896  register uint16_t ret;
897 
898  __ASM volatile (
899 #ifndef __ICCARM__
900  ".syntax unified\n\t"
901 #endif
902  "1:\n\t"
903  "ldrexh %[ret],[%[mem]]\n\t"
904  "adds %[val],%[ret],#1\n\t"
905  "cmp %[lim],%[val]\n\t"
906  "bhi 2f\n\t"
907  "movs %[val],#0\n"
908  "2:\n\t"
909  "strexh %[res],%[val],[%[mem]]\n\t"
910  "cbz %[res],3f\n\t"
911  "b 1b\n"
912  "3:"
913  : [ret] "=&l" (ret),
914  [val] "=&l" (val),
915  [res] "=&l" (res)
916  : [mem] "l" (mem),
917  [lim] "l" (lim)
918  : "cc", "memory"
919  );
920 
921  return ret;
922 }
923 #endif
924 
925 /// Atomic Access Operation: Decrement (32-bit)
926 /// \param[in] mem Memory address
927 /// \return Previous value
928 #if defined(__CC_ARM)
929 static __asm uint32_t atomic_dec32 (uint32_t *mem) {
930  mov r2,r0
931 1
932  ldrex r0,[r2]
933  subs r1,r0,#1
934  strex r3,r1,[r2]
935  cbz r3,%F2
936  b %B1
937 2
938  bx lr
939 }
940 #else
941 __STATIC_INLINE uint32_t atomic_dec32 (uint32_t *mem) {
942 #ifdef __ICCARM__
943 #pragma diag_suppress=Pe550
944 #endif
945  register uint32_t val, res;
946 #ifdef __ICCARM__
947 #pragma diag_default=Pe550
948 #endif
949  register uint32_t ret;
950 
951  __ASM volatile (
952 #ifndef __ICCARM__
953  ".syntax unified\n\t"
954 #endif
955  "1:\n\t"
956  "ldrex %[ret],[%[mem]]\n\t"
957  "subs %[val],%[ret],#1\n\t"
958  "strex %[res],%[val],[%[mem]]\n\t"
959  "cbz %[res],2f\n\t"
960  "b 1b\n"
961  "2:"
962  : [ret] "=&l" (ret),
963  [val] "=&l" (val),
964  [res] "=&l" (res)
965  : [mem] "l" (mem)
966  : "cc", "memory"
967  );
968 
969  return ret;
970 }
971 #endif
972 
973 /// Atomic Access Operation: Decrement (32-bit) if Not Zero
974 /// \param[in] mem Memory address
975 /// \return Previous value
976 #if defined(__CC_ARM)
977 static __asm uint32_t atomic_dec32_nz (uint32_t *mem) {
978  mov r2,r0
979 1
980  ldrex r0,[r2]
981  cbnz r0,%F2
982  clrex
983  bx lr
984 2
985  subs r1,r0,#1
986  strex r3,r1,[r2]
987  cbz r3,%F3
988  b %B1
989 3
990  bx lr
991 }
992 #else
993 __STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
994 #ifdef __ICCARM__
995 #pragma diag_suppress=Pe550
996 #endif
997  register uint32_t val, res;
998 #ifdef __ICCARM__
999 #pragma diag_default=Pe550
1000 #endif
1001  register uint32_t ret;
1002 
1003  __ASM volatile (
1004 #ifndef __ICCARM__
1005  ".syntax unified\n\t"
1006 #endif
1007  "1:\n\t"
1008  "ldrex %[ret],[%[mem]]\n\t"
1009  "cbnz %[ret],2f\n\t"
1010  "clrex\n\t"
1011  "b 3f\n"
1012  "2:\n\t"
1013  "subs %[val],%[ret],#1\n\t"
1014  "strex %[res],%[val],[%[mem]]\n\t"
1015  "cbz %[res],3f\n\t"
1016  "b 1b\n"
1017  "3:"
1018  : [ret] "=&l" (ret),
1019  [val] "=&l" (val),
1020  [res] "=&l" (res)
1021  : [mem] "l" (mem)
1022  : "cc", "memory"
1023  );
1024 
1025  return ret;
1026 }
1027 #endif
1028 
1029 /// Atomic Access Operation: Decrement (16-bit) if Not Zero
1030 /// \param[in] mem Memory address
1031 /// \return Previous value
1032 #if defined(__CC_ARM)
1033 static __asm uint16_t atomic_dec16_nz (uint16_t *mem) {
1034  mov r2,r0
1035 1
1036  ldrexh r0,[r2]
1037  cbnz r0,%F2
1038  clrex
1039  bx lr
1040 2
1041  subs r1,r0,#1
1042  strexh r3,r1,[r2]
1043  cbz r3,%F3
1044  b %B1
1045 3
1046  bx lr
1047 }
1048 #else
1049 __STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
1050 #ifdef __ICCARM__
1051 #pragma diag_suppress=Pe550
1052 #endif
1053  register uint32_t val, res;
1054 #ifdef __ICCARM__
1055 #pragma diag_default=Pe550
1056 #endif
1057  register uint16_t ret;
1058 
1059  __ASM volatile (
1060 #ifndef __ICCARM__
1061  ".syntax unified\n\t"
1062 #endif
1063  "1:\n\t"
1064  "ldrexh %[ret],[%[mem]]\n\t"
1065  "cbnz %[ret],2f\n\t"
1066  "clrex\n\t"
1067  "b 3f\n"
1068  "2:\n\t"
1069  "subs %[val],%[ret],#1\n\t"
1070  "strexh %[res],%[val],[%[mem]]\n\t"
1071  "cbz %[res],3f\n\t"
1072  "b 1b\n"
1073  "3:"
1074  : [ret] "=&l" (ret),
1075  [val] "=&l" (val),
1076  [res] "=&l" (res)
1077  : [mem] "l" (mem)
1078  : "cc", "memory"
1079  );
1080 
1081  return ret;
1082 }
1083 #endif
1084 
1085 /// Atomic Access Operation: Link Get
1086 /// \param[in] root Root address
1087 /// \return Link
1088 #if defined(__CC_ARM)
1089 static __asm void *atomic_link_get (void **root) {
1090  mov r2,r0
1091 1
1092  ldrex r0,[r2]
1093  cbnz r0,%F2
1094  clrex
1095  bx lr
1096 2
1097  ldr r1,[r0]
1098  strex r3,r1,[r2]
1099  cbz r3,%F3
1100  b %B1
1101 3
1102  bx lr
1103 }
1104 #else
1105 __STATIC_INLINE void *atomic_link_get (void **root) {
1106 #ifdef __ICCARM__
1107 #pragma diag_suppress=Pe550
1108 #endif
1109  register uint32_t val, res;
1110 #ifdef __ICCARM__
1111 #pragma diag_default=Pe550
1112 #endif
1113  register void *ret;
1114 
1115  __ASM volatile (
1116 #ifndef __ICCARM__
1117  ".syntax unified\n\t"
1118 #endif
1119  "1:\n\t"
1120  "ldrex %[ret],[%[root]]\n\t"
1121  "cbnz %[ret],2f\n\t"
1122  "clrex\n\t"
1123  "b 3f\n"
1124  "2:\n\t"
1125  "ldr %[val],[%[ret]]\n\t"
1126  "strex %[res],%[val],[%[root]]\n\t"
1127  "cbz %[res],3f\n\t"
1128  "b 1b\n"
1129  "3:"
1130  : [ret] "=&l" (ret),
1131  [val] "=&l" (val),
1132  [res] "=&l" (res)
1133  : [root] "l" (root)
1134  : "cc", "memory"
1135  );
1136 
1137  return ret;
1138 }
1139 #endif
1140 
1141 /// Atomic Access Operation: Link Put
1142 /// \param[in] root Root address
1143 /// \param[in] lnk Link
1144 #if defined(__CC_ARM)
1145 static __asm void atomic_link_put (void **root, void *link) {
1146 1
1147  ldr r2,[r0]
1148  str r2,[r1]
1149  dmb
1150  ldrex r2,[r0]
1151  ldr r3,[r1]
1152  cmp r3,r2
1153  bne %B1
1154  strex r3,r1,[r0]
1155  cbz r3,%F2
1156  b %B1
1157 2
1158  bx lr
1159 }
1160 #else
1161 __STATIC_INLINE void atomic_link_put (void **root, void *link) {
1162 #ifdef __ICCARM__
1163 #pragma diag_suppress=Pe550
1164 #endif
1165  register uint32_t val1, val2, res;
1166 #ifdef __ICCARM__
1167 #pragma diag_default=Pe550
1168 #endif
1169 
1170  __ASM volatile (
1171 #ifndef __ICCARM__
1172  ".syntax unified\n\t"
1173 #endif
1174  "1:\n\t"
1175  "ldr %[val1],[%[root]]\n\t"
1176  "str %[val1],[%[link]]\n\t"
1177  "dmb\n\t"
1178  "ldrex %[val1],[%[root]]\n\t"
1179  "ldr %[val2],[%[link]]\n\t"
1180  "cmp %[val2],%[val1]\n\t"
1181  "bne 1b\n\t"
1182  "strex %[res],%[link],[%[root]]\n\t"
1183  "cbz %[res],2f\n\t"
1184  "b 1b\n"
1185  "2:"
1186  : [val1] "=&l" (val1),
1187  [val2] "=&l" (val2),
1188  [res] "=&l" (res)
1189  : [root] "l" (root),
1190  [link] "l" (link)
1191  : "cc", "memory"
1192  );
1193 }
1194 #endif
1195 
1196 //lint --flb "Library End" [MISRA Note 12]
1197 
1198 #endif // (EXCLUSIVE_ACCESS == 1)
1199 
1200 
1201 #endif // RTX_CORE_CM_H_
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.