Mistake on this page?
Report an issue in GitHub or email us
rtx_core_ca.h
1 /*
2  * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * -----------------------------------------------------------------------------
19  *
20  * Project: CMSIS-RTOS RTX
21  * Title: Cortex-A Core definitions
22  *
23  * -----------------------------------------------------------------------------
24  */
25 
26 #ifndef RTX_CORE_CA_H_
27 #define RTX_CORE_CA_H_
28 
29 #ifndef RTX_CORE_C_H_
30 #include "RTE_Components.h"
31 #include CMSIS_device_header
32 #endif
33 
34 #include <stdbool.h>
35 typedef bool bool_t;
36 #define FALSE ((bool_t)0)
37 #define TRUE ((bool_t)1)
38 
39 #define DOMAIN_NS 0
40 #define EXCLUSIVE_ACCESS 1
41 
42 #define OS_TICK_HANDLER osRtxTick_Handler
43 
44 // CPSR bit definitions
45 #define CPSR_T_BIT 0x20U
46 #define CPSR_I_BIT 0x80U
47 #define CPSR_F_BIT 0x40U
48 
49 // CPSR mode bitmasks
50 #define CPSR_MODE_USER 0x10U
51 #define CPSR_MODE_SYSTEM 0x1FU
52 
53 /// xPSR_Initialization Value
54 /// \param[in] privileged true=privileged, false=unprivileged
55 /// \param[in] thumb true=Thumb, false=Arm
56 /// \return xPSR Init Value
57 __STATIC_INLINE uint32_t xPSR_InitVal (bool_t privileged, bool_t thumb) {
58  uint32_t psr;
59 
60  if (privileged) {
61  if (thumb) {
62  psr = CPSR_MODE_SYSTEM | CPSR_T_BIT;
63  } else {
64  psr = CPSR_MODE_SYSTEM;
65  }
66  } else {
67  if (thumb) {
68  psr = CPSR_MODE_USER | CPSR_T_BIT;
69  } else {
70  psr = CPSR_MODE_USER;
71  }
72  }
73 
74  return psr;
75 }
76 
77 // Stack Frame:
78 // - VFP-D32: D16-31, D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR
79 // - VFP-D16: D0-D15, FPSCR, Reserved, R4-R11, R0-R3, R12, LR, PC, CPSR
80 // - Basic: R4-R11, R0-R3, R12, LR, PC, CPSR
81 
82 /// Stack Frame Initialization Value
83 #define STACK_FRAME_INIT_VAL 0x00U
84 
85 /// Stack Offset of Register R0
86 /// \param[in] stack_frame Stack Frame
87 /// \return R0 Offset
88 __STATIC_INLINE uint32_t StackOffsetR0 (uint8_t stack_frame) {
89  uint32_t offset;
90 
91  if ((stack_frame & 0x04U) != 0U) {
92  offset = (32U*8U) + (2U*4U) + (8U*4U);
93  } else if ((stack_frame & 0x02U) != 0U) {
94  offset = (16U*8U) + (2U*4U) + (8U*4U);
95  } else {
96  offset = (8U*4U);
97  }
98  return offset;
99 }
100 
101 
102 // ==== Emulated Cortex-M functions ====
103 
104 /// Get xPSR Register - emulate M profile: SP_usr - (8*4)
105 /// \return xPSR Register value
106 #if defined(__CC_ARM)
107 #pragma push
108 #pragma arm
109 static __asm uint32_t __get_PSP (void) {
110  sub sp, sp, #4
111  stm sp, {sp}^
112  pop {r0}
113  sub r0, r0, #32
114  bx lr
115 }
116 #pragma pop
117 #else
118 #ifdef __ICCARM__
119 __arm
120 #else
121 __attribute__((target("arm")))
122 #endif
123 __STATIC_INLINE uint32_t __get_PSP (void) {
124  register uint32_t ret;
125 
126  __ASM volatile (
127  "sub sp,sp,#4\n\t"
128  "stm sp,{sp}^\n\t"
129  "pop {%[ret]}\n\t"
130  "sub %[ret],%[ret],#32\n\t"
131  : [ret] "=&l" (ret)
132  :
133  : "memory"
134  );
135 
136  return ret;
137 }
138 #endif
139 
140 /// Set Control Register - not needed for A profile
141 /// \param[in] control Control Register value to set
142 __STATIC_INLINE void __set_CONTROL(uint32_t control) {
143  (void)control;
144 }
145 
146 
147 // ==== Core functions ====
148 
149 /// Check if running Privileged
150 /// \return true=privileged, false=unprivileged
151 __STATIC_INLINE bool_t IsPrivileged (void) {
152  return (__get_mode() != CPSR_MODE_USER);
153 }
154 
155 /// Check if in IRQ Mode
156 /// \return true=IRQ, false=thread
157 __STATIC_INLINE bool_t IsIrqMode (void) {
158  return ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM));
159 }
160 
161 /// Check if IRQ is Masked
162 /// \return true=masked, false=not masked
163 __STATIC_INLINE bool_t IsIrqMasked (void) {
164  return FALSE;
165 }
166 
167 
168 // ==== Core Peripherals functions ====
169 
170 extern uint8_t IRQ_PendSV;
171 
172 /// Setup SVC and PendSV System Service Calls (not needed on Cortex-A)
173 __STATIC_INLINE void SVC_Setup (void) {
174 }
175 
176 /// Get Pending SV (Service Call) Flag
177 /// \return Pending SV Flag
178 __STATIC_INLINE uint8_t GetPendSV (void) {
179  return (IRQ_PendSV);
180 }
181 
182 /// Clear Pending SV (Service Call) Flag
183 __STATIC_INLINE void ClrPendSV (void) {
184  IRQ_PendSV = 0U;
185 }
186 
187 /// Set Pending SV (Service Call) Flag
188 __STATIC_INLINE void SetPendSV (void) {
189  IRQ_PendSV = 1U;
190 }
191 
192 
193 // ==== Service Calls definitions ====
194 
195 #if defined(__CC_ARM)
196 
197 #define __SVC_INDIRECT(n) __svc_indirect(n)
198 
199 #define SVC0_0N(f,t) \
200 __SVC_INDIRECT(0) t svc##f (t(*)()); \
201 __attribute__((always_inline)) \
202 __STATIC_INLINE t __svc##f (void) { \
203  svc##f(svcRtx##f); \
204 }
205 
206 #define SVC0_0(f,t) \
207 __SVC_INDIRECT(0) t svc##f (t(*)()); \
208 __attribute__((always_inline)) \
209 __STATIC_INLINE t __svc##f (void) { \
210  return svc##f(svcRtx##f); \
211 }
212 
213 #define SVC0_1N(f,t,t1) \
214 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
215 __attribute__((always_inline)) \
216 __STATIC_INLINE t __svc##f (t1 a1) { \
217  svc##f(svcRtx##f,a1); \
218 }
219 
220 #define SVC0_1(f,t,t1) \
221 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
222 __attribute__((always_inline)) \
223 __STATIC_INLINE t __svc##f (t1 a1) { \
224  return svc##f(svcRtx##f,a1); \
225 }
226 
227 #define SVC0_2(f,t,t1,t2) \
228 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \
229 __attribute__((always_inline)) \
230 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
231  return svc##f(svcRtx##f,a1,a2); \
232 }
233 
234 #define SVC0_3(f,t,t1,t2,t3) \
235 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \
236 __attribute__((always_inline)) \
237 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
238  return svc##f(svcRtx##f,a1,a2,a3); \
239 }
240 
241 #define SVC0_4(f,t,t1,t2,t3,t4) \
242 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \
243 __attribute__((always_inline)) \
244 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
245  return svc##f(svcRtx##f,a1,a2,a3,a4); \
246 }
247 
248 #elif defined(__ICCARM__)
249 
250 #define SVC_ArgF(f) \
251  __asm( \
252  "mov r12,%0\n" \
253  :: "r"(&f): "r12" \
254  );
255 
256 #define STRINGIFY(a) #a
257 #define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
258 
259 #define SVC0_0N(f,t) \
260 __SVC_INDIRECT(0) t svc##f (); \
261 __attribute__((always_inline)) \
262 __STATIC_INLINE t __svc##f (void) { \
263  SVC_ArgF(svcRtx##f); \
264  svc##f(); \
265 }
266 
267 #define SVC0_0(f,t) \
268 __SVC_INDIRECT(0) t svc##f (); \
269 __attribute__((always_inline)) \
270 __STATIC_INLINE t __svc##f (void) { \
271  SVC_ArgF(svcRtx##f); \
272  return svc##f(); \
273 }
274 
275 #define SVC0_1N(f,t,t1) \
276 __SVC_INDIRECT(0) t svc##f (t1 a1); \
277 __attribute__((always_inline)) \
278 __STATIC_INLINE t __svc##f (t1 a1) { \
279  SVC_ArgF(svcRtx##f); \
280  svc##f(a1); \
281 }
282 
283 #define SVC0_1(f,t,t1) \
284 __SVC_INDIRECT(0) t svc##f (t1 a1); \
285 __attribute__((always_inline)) \
286 __STATIC_INLINE t __svc##f (t1 a1) { \
287  SVC_ArgF(svcRtx##f); \
288  return svc##f(a1); \
289 }
290 
291 #define SVC0_2(f,t,t1,t2) \
292 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \
293 __attribute__((always_inline)) \
294 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
295  SVC_ArgF(svcRtx##f); \
296  return svc##f(a1,a2); \
297 }
298 
299 #define SVC0_3(f,t,t1,t2,t3) \
300 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \
301 __attribute__((always_inline)) \
302 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
303  SVC_ArgF(svcRtx##f); \
304  return svc##f(a1,a2,a3); \
305 }
306 
307 #define SVC0_4(f,t,t1,t2,t3,t4) \
308 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \
309 __attribute__((always_inline)) \
310 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
311  SVC_ArgF(svcRtx##f); \
312  return svc##f(a1,a2,a3,a4); \
313 }
314 
315 #else // !(defined(__CC_ARM) || defined(__ICCARM__))
316 
317 #define SVC_RegF "r12"
318 
319 #define SVC_ArgN(n) \
320 register uint32_t __r##n __ASM("r"#n)
321 
322 #define SVC_ArgR(n,a) \
323 register uint32_t __r##n __ASM("r"#n) = (uint32_t)a
324 
325 #define SVC_ArgF(f) \
326 register uint32_t __rf __ASM(SVC_RegF) = (uint32_t)f
327 
328 #define SVC_In0 "r"(__rf)
329 #define SVC_In1 "r"(__rf),"r"(__r0)
330 #define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1)
331 #define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2)
332 #define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3)
333 
334 #define SVC_Out0
335 #define SVC_Out1 "=r"(__r0)
336 
337 #define SVC_CL0
338 #define SVC_CL1 "r1"
339 #define SVC_CL2 "r0","r1"
340 
341 #define SVC_Call0(in, out, cl) \
342  __ASM volatile ("svc 0" : out : in : cl)
343 
344 #define SVC0_0N(f,t) \
345 __attribute__((always_inline)) \
346 __STATIC_INLINE t __svc##f (void) { \
347  SVC_ArgF(svcRtx##f); \
348  SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2); \
349 }
350 
351 #define SVC0_0(f,t) \
352 __attribute__((always_inline)) \
353 __STATIC_INLINE t __svc##f (void) { \
354  SVC_ArgN(0); \
355  SVC_ArgF(svcRtx##f); \
356  SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1); \
357  return (t) __r0; \
358 }
359 
360 #define SVC0_1N(f,t,t1) \
361 __attribute__((always_inline)) \
362 __STATIC_INLINE t __svc##f (t1 a1) { \
363  SVC_ArgR(0,a1); \
364  SVC_ArgF(svcRtx##f); \
365  SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1); \
366 }
367 
368 #define SVC0_1(f,t,t1) \
369 __attribute__((always_inline)) \
370 __STATIC_INLINE t __svc##f (t1 a1) { \
371  SVC_ArgR(0,a1); \
372  SVC_ArgF(svcRtx##f); \
373  SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1); \
374  return (t) __r0; \
375 }
376 
377 #define SVC0_2(f,t,t1,t2) \
378 __attribute__((always_inline)) \
379 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
380  SVC_ArgR(0,a1); \
381  SVC_ArgR(1,a2); \
382  SVC_ArgF(svcRtx##f); \
383  SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0); \
384  return (t) __r0; \
385 }
386 
387 #define SVC0_3(f,t,t1,t2,t3) \
388 __attribute__((always_inline)) \
389 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
390  SVC_ArgR(0,a1); \
391  SVC_ArgR(1,a2); \
392  SVC_ArgR(2,a3); \
393  SVC_ArgF(svcRtx##f); \
394  SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0); \
395  return (t) __r0; \
396 }
397 
398 #define SVC0_4(f,t,t1,t2,t3,t4) \
399 __attribute__((always_inline)) \
400 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
401  SVC_ArgR(0,a1); \
402  SVC_ArgR(1,a2); \
403  SVC_ArgR(2,a3); \
404  SVC_ArgR(3,a4); \
405  SVC_ArgF(svcRtx##f); \
406  SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0); \
407  return (t) __r0; \
408 }
409 
410 #endif
411 
412 
413 // ==== Exclusive Access Operation ====
414 
415 #if (EXCLUSIVE_ACCESS == 1)
416 
417 /// Atomic Access Operation: Write (8-bit)
418 /// \param[in] mem Memory address
419 /// \param[in] val Value to write
420 /// \return Previous value
421 #if defined(__CC_ARM)
422 static __asm uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
423  mov r2,r0
424 1
425  ldrexb r0,[r2]
426  strexb r3,r1,[r2]
427  cmp r3,#0
428  bne %B1
429  bx lr
430 }
431 #else
432 __STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
433 #ifdef __ICCARM__
434 #pragma diag_suppress=Pe550
435 #endif
436  register uint32_t res;
437 #ifdef __ICCARM__
438 #pragma diag_default=Pe550
439 #endif
440  register uint8_t ret;
441 
442  __ASM volatile (
443 #ifndef __ICCARM__
444  ".syntax unified\n\t"
445 #endif
446  "1:\n\t"
447  "ldrexb %[ret],[%[mem]]\n\t"
448  "strexb %[res],%[val],[%[mem]]\n\t"
449  "cmp %[res],#0\n\t"
450  "bne 1b\n\t"
451  : [ret] "=&l" (ret),
452  [res] "=&l" (res)
453  : [mem] "l" (mem),
454  [val] "l" (val)
455  : "memory"
456  );
457 
458  return ret;
459 }
460 #endif
461 
462 /// Atomic Access Operation: Set bits (32-bit)
463 /// \param[in] mem Memory address
464 /// \param[in] bits Bit mask
465 /// \return New value
466 #if defined(__CC_ARM)
467 static __asm uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
468  mov r2,r0
469 1
470  ldrex r0,[r2]
471  orr r0,r0,r1
472  strex r3,r0,[r2]
473  cmp r3,#0
474  bne %B1
475  bx lr
476 }
477 #else
478 __STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
479 #ifdef __ICCARM__
480 #pragma diag_suppress=Pe550
481 #endif
482  register uint32_t val, res;
483 #ifdef __ICCARM__
484 #pragma diag_default=Pe550
485 #endif
486  register uint32_t ret;
487 
488  __ASM volatile (
489 #ifndef __ICCARM__
490  ".syntax unified\n\t"
491 #endif
492  "1:\n\t"
493  "ldrex %[val],[%[mem]]\n\t"
494  "orr %[ret],%[val],%[bits]\n\t"
495  "strex %[res],%[ret],[%[mem]]\n\t"
496  "cmp %[res],#0\n\t"
497  "bne 1b\n"
498  : [ret] "=&l" (ret),
499  [val] "=&l" (val),
500  [res] "=&l" (res)
501  : [mem] "l" (mem),
502  [bits] "l" (bits)
503  : "memory"
504  );
505 
506  return ret;
507 }
508 #endif
509 
510 /// Atomic Access Operation: Clear bits (32-bit)
511 /// \param[in] mem Memory address
512 /// \param[in] bits Bit mask
513 /// \return Previous value
514 #if defined(__CC_ARM)
515 static __asm uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
516  push {r4,lr}
517  mov r2,r0
518 1
519  ldrex r0,[r2]
520  bic r4,r0,r1
521  strex r3,r4,[r2]
522  cmp r3,#0
523  bne %B1
524  pop {r4,pc}
525 }
526 #else
527 __STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
528 #ifdef __ICCARM__
529 #pragma diag_suppress=Pe550
530 #endif
531  register uint32_t val, res;
532 #ifdef __ICCARM__
533 #pragma diag_default=Pe550
534 #endif
535  register uint32_t ret;
536 
537  __ASM volatile (
538 #ifndef __ICCARM__
539  ".syntax unified\n\t"
540 #endif
541  "1:\n\t"
542  "ldrex %[ret],[%[mem]]\n\t"
543  "bic %[val],%[ret],%[bits]\n\t"
544  "strex %[res],%[val],[%[mem]]\n\t"
545  "cmp %[res],#0\n\t"
546  "bne 1b\n"
547  : [ret] "=&l" (ret),
548  [val] "=&l" (val),
549  [res] "=&l" (res)
550  : [mem] "l" (mem),
551  [bits] "l" (bits)
552  : "memory"
553  );
554 
555  return ret;
556 }
557 #endif
558 
559 /// Atomic Access Operation: Check if all specified bits (32-bit) are active and clear them
560 /// \param[in] mem Memory address
561 /// \param[in] bits Bit mask
562 /// \return Active bits before clearing or 0 if not active
563 #if defined(__CC_ARM)
564 static __asm uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
565  push {r4,lr}
566  mov r2,r0
567 1
568  ldrex r0,[r2]
569  and r4,r0,r1
570  cmp r4,r1
571  beq %F2
572  clrex
573  movs r0,#0
574  pop {r4,pc}
575 2
576  bic r4,r0,r1
577  strex r3,r4,[r2]
578  cmp r3,#0
579  bne %B1
580  pop {r4,pc}
581 }
582 #else
583 __STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
584 #ifdef __ICCARM__
585 #pragma diag_suppress=Pe550
586 #endif
587  register uint32_t val, res;
588 #ifdef __ICCARM__
589 #pragma diag_default=Pe550
590 #endif
591  register uint32_t ret;
592 
593  __ASM volatile (
594 #ifndef __ICCARM__
595  ".syntax unified\n\t"
596 #endif
597  "1:\n\t"
598  "ldrex %[ret],[%[mem]]\n\t"
599  "and %[val],%[ret],%[bits]\n\t"
600  "cmp %[val],%[bits]\n\t"
601  "beq 2f\n\t"
602  "clrex\n\t"
603  "movs %[ret],#0\n\t"
604  "b 3f\n"
605  "2:\n\t"
606  "bic %[val],%[ret],%[bits]\n\t"
607  "strex %[res],%[val],[%[mem]]\n\t"
608  "cmp %[res],#0\n\t"
609  "bne 1b\n"
610  "3:"
611  : [ret] "=&l" (ret),
612  [val] "=&l" (val),
613  [res] "=&l" (res)
614  : [mem] "l" (mem),
615  [bits] "l" (bits)
616  : "cc", "memory"
617  );
618 
619  return ret;
620 }
621 #endif
622 
623 /// Atomic Access Operation: Check if any specified bits (32-bit) are active and clear them
624 /// \param[in] mem Memory address
625 /// \param[in] bits Bit mask
626 /// \return Active bits before clearing or 0 if not active
627 #if defined(__CC_ARM)
628 static __asm uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
629  push {r4,lr}
630  mov r2,r0
631 1
632  ldrex r0,[r2]
633  tst r0,r1
634  bne %F2
635  clrex
636  movs r0,#0
637  pop {r4,pc}
638 2
639  bic r4,r0,r1
640  strex r3,r4,[r2]
641  cmp r3,#0
642  bne %B1
643  pop {r4,pc}
644 }
645 #else
646 __STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
647 #ifdef __ICCARM__
648 #pragma diag_suppress=Pe550
649 #endif
650  register uint32_t val, res;
651 #ifdef __ICCARM__
652 #pragma diag_default=Pe550
653 #endif
654  register uint32_t ret;
655 
656  __ASM volatile (
657 #ifndef __ICCARM__
658  ".syntax unified\n\t"
659 #endif
660  "1:\n\t"
661  "ldrex %[ret],[%[mem]]\n\t"
662  "tst %[ret],%[bits]\n\t"
663  "bne 2f\n\t"
664  "clrex\n\t"
665  "movs %[ret],#0\n\t"
666  "b 3f\n"
667  "2:\n\t"
668  "bic %[val],%[ret],%[bits]\n\t"
669  "strex %[res],%[val],[%[mem]]\n\t"
670  "cmp %[res],#0\n\t"
671  "bne 1b\n"
672  "3:"
673  : [ret] "=&l" (ret),
674  [val] "=&l" (val),
675  [res] "=&l" (res)
676  : [mem] "l" (mem),
677  [bits] "l" (bits)
678  : "cc", "memory"
679  );
680 
681  return ret;
682 }
683 #endif
684 
685 /// Atomic Access Operation: Increment (32-bit)
686 /// \param[in] mem Memory address
687 /// \return Previous value
688 #if defined(__CC_ARM)
689 static __asm uint32_t atomic_inc32 (uint32_t *mem) {
690  mov r2,r0
691 1
692  ldrex r0,[r2]
693  adds r1,r0,#1
694  strex r3,r1,[r2]
695  cmp r3,#0
696  bne %B1
697  bx lr
698 }
699 #else
700 __STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
701 #ifdef __ICCARM__
702 #pragma diag_suppress=Pe550
703 #endif
704  register uint32_t val, res;
705 #ifdef __ICCARM__
706 #pragma diag_default=Pe550
707 #endif
708  register uint32_t ret;
709 
710  __ASM volatile (
711 #ifndef __ICCARM__
712  ".syntax unified\n\t"
713 #endif
714  "1:\n\t"
715  "ldrex %[ret],[%[mem]]\n\t"
716  "adds %[val],%[ret],#1\n\t"
717  "strex %[res],%[val],[%[mem]]\n\t"
718  "cmp %[res],#0\n\t"
719  "bne 1b\n"
720  : [ret] "=&l" (ret),
721  [val] "=&l" (val),
722  [res] "=&l" (res)
723  : [mem] "l" (mem)
724  : "cc", "memory"
725  );
726 
727  return ret;
728 }
729 #endif
730 
731 /// Atomic Access Operation: Increment (16-bit) if Less Than
732 /// \param[in] mem Memory address
733 /// \param[in] max Maximum value
734 /// \return Previous value
735 #if defined(__CC_ARM)
736 static __asm uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
737  push {r4,lr}
738  mov r2,r0
739 1
740  ldrexh r0,[r2]
741  cmp r1,r0
742  bhi %F2
743  clrex
744  pop {r4,pc}
745 2
746  adds r4,r0,#1
747  strexh r3,r4,[r2]
748  cmp r3,#0
749  bne %B1
750  pop {r4,pc}
751 }
752 #else
753 __STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
754 #ifdef __ICCARM__
755 #pragma diag_suppress=Pe550
756 #endif
757  register uint32_t val, res;
758 #ifdef __ICCARM__
759 #pragma diag_default=Pe550
760 #endif
761  register uint16_t ret;
762 
763  __ASM volatile (
764 #ifndef __ICCARM__
765  ".syntax unified\n\t"
766 #endif
767  "1:\n\t"
768  "ldrexh %[ret],[%[mem]]\n\t"
769  "cmp %[max],%[ret]\n\t"
770  "bhi 2f\n\t"
771  "clrex\n\t"
772  "b 3f\n"
773  "2:\n\t"
774  "adds %[val],%[ret],#1\n\t"
775  "strexh %[res],%[val],[%[mem]]\n\t"
776  "cmp %[res],#0\n\t"
777  "bne 1b\n"
778  "3:"
779  : [ret] "=&l" (ret),
780  [val] "=&l" (val),
781  [res] "=&l" (res)
782  : [mem] "l" (mem),
783  [max] "l" (max)
784  : "cc", "memory"
785  );
786 
787  return ret;
788 }
789 #endif
790 
791 /// Atomic Access Operation: Increment (16-bit) and clear on Limit
792 /// \param[in] mem Memory address
793 /// \param[in] max Maximum value
794 /// \return Previous value
795 #if defined(__CC_ARM)
796 static __asm uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
797  push {r4,lr}
798  mov r2,r0
799 1
800  ldrexh r0,[r2]
801  adds r4,r0,#1
802  cmp r1,r4
803  bhi %F2
804  movs r4,#0
805 2
806  strexh r3,r4,[r2]
807  cmp r3,#0
808  bne %B1
809  pop {r4,pc}
810 }
811 #else
812 __STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
813 #ifdef __ICCARM__
814 #pragma diag_suppress=Pe550
815 #endif
816  register uint32_t val, res;
817 #ifdef __ICCARM__
818 #pragma diag_default=Pe550
819 #endif
820  register uint16_t ret;
821 
822  __ASM volatile (
823 #ifndef __ICCARM__
824  ".syntax unified\n\t"
825 #endif
826  "1:\n\t"
827  "ldrexh %[ret],[%[mem]]\n\t"
828  "adds %[val],%[ret],#1\n\t"
829  "cmp %[lim],%[val]\n\t"
830  "bhi 2f\n\t"
831  "movs %[val],#0\n"
832  "2:\n\t"
833  "strexh %[res],%[val],[%[mem]]\n\t"
834  "cmp %[res],#0\n\t"
835  "bne 1b\n"
836  : [ret] "=&l" (ret),
837  [val] "=&l" (val),
838  [res] "=&l" (res)
839  : [mem] "l" (mem),
840  [lim] "l" (lim)
841  : "cc", "memory"
842  );
843 
844  return ret;
845 }
846 #endif
847 
848 /// Atomic Access Operation: Decrement (32-bit)
849 /// \param[in] mem Memory address
850 /// \return Previous value
851 #if defined(__CC_ARM)
852 static __asm uint32_t atomic_dec32 (uint32_t *mem) {
853  mov r2,r0
854 1
855  ldrex r0,[r2]
856  subs r1,r0,#1
857  strex r3,r1,[r2]
858  cmp r3,#0
859  bne %B1
860  bx lr
861 }
862 #else
863 __STATIC_INLINE uint32_t atomic_dec32 (uint32_t *mem) {
864 #ifdef __ICCARM__
865 #pragma diag_suppress=Pe550
866 #endif
867  register uint32_t val, res;
868 #ifdef __ICCARM__
869 #pragma diag_default=Pe550
870 #endif
871  register uint32_t ret;
872 
873  __ASM volatile (
874 #ifndef __ICCARM__
875  ".syntax unified\n\t"
876 #endif
877  "1:\n\t"
878  "ldrex %[ret],[%[mem]]\n\t"
879  "subs %[val],%[ret],#1\n\t"
880  "strex %[res],%[val],[%[mem]]\n\t"
881  "cmp %[res],#0\n\t"
882  "bne 1b\n"
883  : [ret] "=&l" (ret),
884  [val] "=&l" (val),
885  [res] "=&l" (res)
886  : [mem] "l" (mem)
887  : "cc", "memory"
888  );
889 
890  return ret;
891 }
892 #endif
893 
894 /// Atomic Access Operation: Decrement (32-bit) if Not Zero
895 /// \param[in] mem Memory address
896 /// \return Previous value
897 #if defined(__CC_ARM)
898 static __asm uint32_t atomic_dec32_nz (uint32_t *mem) {
899  mov r2,r0
900 1
901  ldrex r0,[r2]
902  cmp r0,#0
903  bne %F2
904  clrex
905  bx lr
906 2
907  subs r1,r0,#1
908  strex r3,r1,[r2]
909  cmp r3,#0
910  bne %B1
911  bx lr
912 }
913 #else
914 __STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
915 #ifdef __ICCARM__
916 #pragma diag_suppress=Pe550
917 #endif
918  register uint32_t val, res;
919 #ifdef __ICCARM__
920 #pragma diag_default=Pe550
921 #endif
922  register uint32_t ret;
923 
924  __ASM volatile (
925 #ifndef __ICCARM__
926  ".syntax unified\n\t"
927 #endif
928  "1:\n\t"
929  "ldrex %[ret],[%[mem]]\n\t"
930  "cmp %[ret],#0\n\t"
931  "bne 2f\n"
932  "clrex\n\t"
933  "b 3f\n"
934  "2:\n\t"
935  "subs %[val],%[ret],#1\n\t"
936  "strex %[res],%[val],[%[mem]]\n\t"
937  "cmp %[res],#0\n\t"
938  "bne 1b\n"
939  "3:"
940  : [ret] "=&l" (ret),
941  [val] "=&l" (val),
942  [res] "=&l" (res)
943  : [mem] "l" (mem)
944  : "cc", "memory"
945  );
946 
947  return ret;
948 }
949 #endif
950 
951 /// Atomic Access Operation: Decrement (16-bit) if Not Zero
952 /// \param[in] mem Memory address
953 /// \return Previous value
954 #if defined(__CC_ARM)
955 static __asm uint16_t atomic_dec16_nz (uint16_t *mem) {
956  mov r2,r0
957 1
958  ldrexh r0,[r2]
959  cmp r0,#0
960  bne %F2
961  clrex
962  bx lr
963 2
964  subs r1,r0,#1
965  strexh r3,r1,[r2]
966  cmp r3,#0
967  bne %B1
968  bx lr
969 }
970 #else
971 __STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
972 #ifdef __ICCARM__
973 #pragma diag_suppress=Pe550
974 #endif
975  register uint32_t val, res;
976 #ifdef __ICCARM__
977 #pragma diag_default=Pe550
978 #endif
979  register uint16_t ret;
980 
981  __ASM volatile (
982 #ifndef __ICCARM__
983  ".syntax unified\n\t"
984 #endif
985  "1:\n\t"
986  "ldrexh %[ret],[%[mem]]\n\t"
987  "cmp %[ret],#0\n\t"
988  "bne 2f\n\t"
989  "clrex\n\t"
990  "b 3f\n"
991  "2:\n\t"
992  "subs %[val],%[ret],#1\n\t"
993  "strexh %[res],%[val],[%[mem]]\n\t"
994  "cmp %[res],#0\n\t"
995  "bne 1b\n"
996  "3:"
997  : [ret] "=&l" (ret),
998  [val] "=&l" (val),
999  [res] "=&l" (res)
1000  : [mem] "l" (mem)
1001  : "cc", "memory"
1002  );
1003 
1004  return ret;
1005 }
1006 #endif
1007 
1008 /// Atomic Access Operation: Link Get
1009 /// \param[in] root Root address
1010 /// \return Link
1011 #if defined(__CC_ARM)
1012 static __asm void *atomic_link_get (void **root) {
1013  mov r2,r0
1014 1
1015  ldrex r0,[r2]
1016  cmp r0,#0
1017  bne %F2
1018  clrex
1019  bx lr
1020 2
1021  ldr r1,[r0]
1022  strex r3,r1,[r2]
1023  cmp r3,#0
1024  bne %B1
1025  bx lr
1026 }
1027 #else
1028 __STATIC_INLINE void *atomic_link_get (void **root) {
1029 #ifdef __ICCARM__
1030 #pragma diag_suppress=Pe550
1031 #endif
1032  register uint32_t val, res;
1033 #ifdef __ICCARM__
1034 #pragma diag_default=Pe550
1035 #endif
1036  register void *ret;
1037 
1038  __ASM volatile (
1039 #ifndef __ICCARM__
1040  ".syntax unified\n\t"
1041 #endif
1042  "1:\n\t"
1043  "ldrex %[ret],[%[root]]\n\t"
1044  "cmp %[ret],#0\n\t"
1045  "bne 2f\n\t"
1046  "clrex\n\t"
1047  "b 3f\n"
1048  "2:\n\t"
1049  "ldr %[val],[%[ret]]\n\t"
1050  "strex %[res],%[val],[%[root]]\n\t"
1051  "cmp %[res],#0\n\t"
1052  "bne 1b\n"
1053  "3:"
1054  : [ret] "=&l" (ret),
1055  [val] "=&l" (val),
1056  [res] "=&l" (res)
1057  : [root] "l" (root)
1058  : "cc", "memory"
1059  );
1060 
1061  return ret;
1062 }
1063 #endif
1064 
1065 /// Atomic Access Operation: Link Put
1066 /// \param[in] root Root address
1067 /// \param[in] lnk Link
1068 #if defined(__CC_ARM)
1069 static __asm void atomic_link_put (void **root, void *link) {
1070 1
1071  ldr r2,[r0]
1072  str r2,[r1]
1073  dmb
1074  ldrex r2,[r0]
1075  ldr r3,[r1]
1076  cmp r3,r2
1077  bne %B1
1078  strex r3,r1,[r0]
1079  cmp r3,#0
1080  bne %B1
1081  bx lr
1082 }
1083 #else
1084 __STATIC_INLINE void atomic_link_put (void **root, void *link) {
1085 #ifdef __ICCARM__
1086 #pragma diag_suppress=Pe550
1087 #endif
1088  register uint32_t val1, val2, res;
1089 #ifdef __ICCARM__
1090 #pragma diag_default=Pe550
1091 #endif
1092 
1093  __ASM volatile (
1094 #ifndef __ICCARM__
1095  ".syntax unified\n\t"
1096 #endif
1097  "1:\n\t"
1098  "ldr %[val1],[%[root]]\n\t"
1099  "str %[val1],[%[link]]\n\t"
1100  "dmb\n\t"
1101  "ldrex %[val1],[%[root]]\n\t"
1102  "ldr %[val2],[%[link]]\n\t"
1103  "cmp %[val2],%[val1]\n\t"
1104  "bne 1b\n\t"
1105  "strex %[res],%[link],[%[root]]\n\t"
1106  "cmp %[res],#0\n\t"
1107  "bne 1b\n"
1108  : [val1] "=&l" (val1),
1109  [val2] "=&l" (val2),
1110  [res] "=&l" (res)
1111  : [root] "l" (root),
1112  [link] "l" (link)
1113  : "cc", "memory"
1114  );
1115 }
1116 #endif
1117 
1118 #endif // (EXCLUSIVE_ACCESS == 1)
1119 
1120 
1121 #endif // RTX_CORE_CA_H_
Important Information for this Arm website

This site uses cookies to store information on your computer. By continuing to use our site, you consent to our cookies. If you are not happy with the use of these cookies, please review our Cookie Policy to learn how they can be disabled. By disabling cookies, some features of the site will not work.