26 #ifndef RTX_CORE_CM_H_ 27 #define RTX_CORE_CM_H_ 30 #include "RTE_Components.h" 31 #include CMSIS_device_header 39 #ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS 48 #if ((!defined(__ARM_ARCH_8M_BASE__) || (__ARM_ARCH_8M_BASE__ == 0)) && \ 49 (!defined(__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8M_MAIN__ == 0))) 50 #error "Non-secure domain requires ARMv8-M Architecture!" 54 #ifndef EXCLUSIVE_ACCESS 55 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 56 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ 57 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) || \ 58 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0))) 59 #define EXCLUSIVE_ACCESS 1 61 #define EXCLUSIVE_ACCESS 0 65 #define OS_TICK_HANDLER SysTick_Handler 71 __STATIC_INLINE uint32_t xPSR_InitVal (bool_t privileged, bool_t thumb) {
83 #define STACK_FRAME_INIT_VAL 0xBCU 85 #define STACK_FRAME_INIT_VAL 0xFDU 91 __STATIC_INLINE uint32_t StackOffsetR0 (uint8_t stack_frame) {
92 #if (__FPU_USED == 1U) 93 return (((stack_frame & 0x10U) == 0U) ? ((16U+8U)*4U) : (8U*4U));
110 __STATIC_INLINE bool_t IsPrivileged (
void) {
111 return ((__get_CONTROL() & 1U) == 0U);
116 __STATIC_INLINE bool_t IsIrqMode (
void) {
117 return (__get_IPSR() != 0U);
122 __STATIC_INLINE bool_t IsIrqMasked (
void) {
123 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 124 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ 125 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0))) 126 return ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U));
128 return (__get_PRIMASK() != 0U);
136 __STATIC_INLINE
void SVC_Setup (
void) {
137 #if ((defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \ 138 (defined(__CORTEX_M) && (__CORTEX_M == 7U))) 141 SCB->SHPR[10] = 0xFFU;
142 n = 32U - (uint32_t)__CLZ(~(SCB->SHPR[10] | 0xFFFFFF00U));
143 p = NVIC_GetPriorityGrouping();
147 SCB->SHPR[7] = (uint8_t)(0xFEU << n);
148 #elif (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 151 SCB->SHPR[1] |= 0x00FF0000U;
153 SCB->SHPR[0] |= (n << (8+1)) & 0xFC000000U;
154 #elif ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 155 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0))) 158 SCB->SHP[10] = 0xFFU;
159 n = 32U - (uint32_t)__CLZ(~(SCB->SHP[10] | 0xFFFFFF00U));
160 p = NVIC_GetPriorityGrouping();
164 SCB->SHP[7] = (uint8_t)(0xFEU << n);
165 #elif (defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) 168 SCB->SHP[1] |= 0x00FF0000U;
170 SCB->SHP[0] |= (n << (8+1)) & 0xFC000000U;
176 __STATIC_INLINE uint8_t GetPendSV (
void) {
177 return ((uint8_t)((SCB->ICSR & (SCB_ICSR_PENDSVSET_Msk)) >> 24));
181 __STATIC_INLINE
void ClrPendSV (
void) {
182 SCB->ICSR = SCB_ICSR_PENDSVCLR_Msk;
186 __STATIC_INLINE
void SetPendSV (
void) {
187 SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
195 #if defined(__CC_ARM) 197 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 198 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ 199 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0))) 200 #define __SVC_INDIRECT(n) __svc_indirect(n) 201 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \ 202 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))) 203 #define __SVC_INDIRECT(n) __svc_indirect_r7(n) 206 #define SVC0_0N(f,t) \ 207 __SVC_INDIRECT(0) t svc##f (t(*)()); \ 208 __attribute__((always_inline)) \ 209 __STATIC_INLINE t __svc##f (void) { \ 213 #define SVC0_0(f,t) \ 214 __SVC_INDIRECT(0) t svc##f (t(*)()); \ 215 __attribute__((always_inline)) \ 216 __STATIC_INLINE t __svc##f (void) { \ 217 return svc##f(svcRtx##f); \ 220 #define SVC0_1N(f,t,t1) \ 221 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ 222 __attribute__((always_inline)) \ 223 __STATIC_INLINE t __svc##f (t1 a1) { \ 224 svc##f(svcRtx##f,a1); \ 227 #define SVC0_1(f,t,t1) \ 228 __SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ 229 __attribute__((always_inline)) \ 230 __STATIC_INLINE t __svc##f (t1 a1) { \ 231 return svc##f(svcRtx##f,a1); \ 234 #define SVC0_2(f,t,t1,t2) \ 235 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \ 236 __attribute__((always_inline)) \ 237 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 238 return svc##f(svcRtx##f,a1,a2); \ 241 #define SVC0_3(f,t,t1,t2,t3) \ 242 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \ 243 __attribute__((always_inline)) \ 244 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 245 return svc##f(svcRtx##f,a1,a2,a3); \ 248 #define SVC0_4(f,t,t1,t2,t3,t4) \ 249 __SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \ 250 __attribute__((always_inline)) \ 251 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 252 return svc##f(svcRtx##f,a1,a2,a3,a4); \ 255 #elif defined(__ICCARM__) 257 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 258 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ 259 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0))) 260 #define SVC_ArgF(f) \ 265 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \ 266 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))) 267 #define SVC_ArgF(f) \ 274 #define STRINGIFY(a) #a 275 #define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi 277 #define SVC0_0N(f,t) \ 278 __SVC_INDIRECT(0) t svc##f (); \ 279 __attribute__((always_inline)) \ 280 __STATIC_INLINE t __svc##f (void) { \ 281 SVC_ArgF(svcRtx##f); \ 285 #define SVC0_0(f,t) \ 286 __SVC_INDIRECT(0) t svc##f (); \ 287 __attribute__((always_inline)) \ 288 __STATIC_INLINE t __svc##f (void) { \ 289 SVC_ArgF(svcRtx##f); \ 293 #define SVC0_1N(f,t,t1) \ 294 __SVC_INDIRECT(0) t svc##f (t1 a1); \ 295 __attribute__((always_inline)) \ 296 __STATIC_INLINE t __svc##f (t1 a1) { \ 297 SVC_ArgF(svcRtx##f); \ 301 #define SVC0_1(f,t,t1) \ 302 __SVC_INDIRECT(0) t svc##f (t1 a1); \ 303 __attribute__((always_inline)) \ 304 __STATIC_INLINE t __svc##f (t1 a1) { \ 305 SVC_ArgF(svcRtx##f); \ 309 #define SVC0_2(f,t,t1,t2) \ 310 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \ 311 __attribute__((always_inline)) \ 312 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 313 SVC_ArgF(svcRtx##f); \ 314 return svc##f(a1,a2); \ 317 #define SVC0_3(f,t,t1,t2,t3) \ 318 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \ 319 __attribute__((always_inline)) \ 320 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 321 SVC_ArgF(svcRtx##f); \ 322 return svc##f(a1,a2,a3); \ 325 #define SVC0_4(f,t,t1,t2,t3,t4) \ 326 __SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \ 327 __attribute__((always_inline)) \ 328 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 329 SVC_ArgF(svcRtx##f); \ 330 return svc##f(a1,a2,a3,a4); \ 333 #else // !(defined(__CC_ARM) || defined(__ICCARM__)) 337 #if ((defined(__ARM_ARCH_7M__) && (__ARM_ARCH_7M__ != 0)) || \ 338 (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ 339 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0))) 340 #define SVC_RegF "r12" 341 #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \ 342 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))) 343 #define SVC_RegF "r7" 346 #define SVC_ArgN(n) \ 347 register uint32_t __r##n __ASM("r"#n) 349 #define SVC_ArgR(n,a) \ 350 register uint32_t __r##n __ASM("r"#n) = (uint32_t)a 352 #define SVC_ArgF(f) \ 353 register uint32_t __rf __ASM(SVC_RegF) = (uint32_t)f 355 #define SVC_In0 "r"(__rf) 356 #define SVC_In1 "r"(__rf),"r"(__r0) 357 #define SVC_In2 "r"(__rf),"r"(__r0),"r"(__r1) 358 #define SVC_In3 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2) 359 #define SVC_In4 "r"(__rf),"r"(__r0),"r"(__r1),"r"(__r2),"r"(__r3) 362 #define SVC_Out1 "=r"(__r0) 366 #define SVC_CL2 "r0","r1" 368 #define SVC_Call0(in, out, cl) \ 369 __ASM volatile ("svc 0" : out : in : cl) 371 #define SVC0_0N(f,t) \ 372 __attribute__((always_inline)) \ 373 __STATIC_INLINE t __svc##f (void) { \ 374 SVC_ArgF(svcRtx##f); \ 375 SVC_Call0(SVC_In0, SVC_Out0, SVC_CL2); \ 378 #define SVC0_0(f,t) \ 379 __attribute__((always_inline)) \ 380 __STATIC_INLINE t __svc##f (void) { \ 382 SVC_ArgF(svcRtx##f); \ 383 SVC_Call0(SVC_In0, SVC_Out1, SVC_CL1); \ 387 #define SVC0_1N(f,t,t1) \ 388 __attribute__((always_inline)) \ 389 __STATIC_INLINE t __svc##f (t1 a1) { \ 391 SVC_ArgF(svcRtx##f); \ 392 SVC_Call0(SVC_In1, SVC_Out0, SVC_CL1); \ 395 #define SVC0_1(f,t,t1) \ 396 __attribute__((always_inline)) \ 397 __STATIC_INLINE t __svc##f (t1 a1) { \ 399 SVC_ArgF(svcRtx##f); \ 400 SVC_Call0(SVC_In1, SVC_Out1, SVC_CL1); \ 404 #define SVC0_2(f,t,t1,t2) \ 405 __attribute__((always_inline)) \ 406 __STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ 409 SVC_ArgF(svcRtx##f); \ 410 SVC_Call0(SVC_In2, SVC_Out1, SVC_CL0); \ 414 #define SVC0_3(f,t,t1,t2,t3) \ 415 __attribute__((always_inline)) \ 416 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ 420 SVC_ArgF(svcRtx##f); \ 421 SVC_Call0(SVC_In3, SVC_Out1, SVC_CL0); \ 425 #define SVC0_4(f,t,t1,t2,t3,t4) \ 426 __attribute__((always_inline)) \ 427 __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ 432 SVC_ArgF(svcRtx##f); \ 433 SVC_Call0(SVC_In4, SVC_Out1, SVC_CL0); \ 444 #if (EXCLUSIVE_ACCESS == 1) 452 #if defined(__CC_ARM) 453 static __asm uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
464 __STATIC_INLINE uint8_t atomic_wr8 (uint8_t *mem, uint8_t val) {
466 #pragma diag_suppress=Pe550 468 register uint32_t res;
470 #pragma diag_default=Pe550 472 register uint8_t ret;
476 ".syntax unified\n\t" 479 "ldrexb %[ret],[%[mem]]\n\t" 480 "strexb %[res],%[val],[%[mem]]\n\t" 499 #if defined(__CC_ARM) 500 static __asm uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
512 __STATIC_INLINE uint32_t atomic_set32 (uint32_t *mem, uint32_t bits) {
514 #pragma diag_suppress=Pe550 516 register uint32_t val, res;
518 #pragma diag_default=Pe550 520 register uint32_t ret;
524 ".syntax unified\n\t" 527 "ldrex %[val],[%[mem]]\n\t" 528 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 529 "mov %[ret],%[val]\n\t" 530 "orrs %[ret],%[bits]\n\t" 532 "orr %[ret],%[val],%[bits]\n\t" 534 "strex %[res],%[ret],[%[mem]]\n\t" 543 #
if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
558 #if defined(__CC_ARM) 559 static __asm uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
572 __STATIC_INLINE uint32_t atomic_clr32 (uint32_t *mem, uint32_t bits) {
574 #pragma diag_suppress=Pe550 576 register uint32_t val, res;
578 #pragma diag_default=Pe550 580 register uint32_t ret;
584 ".syntax unified\n\t" 587 "ldrex %[ret],[%[mem]]\n\t" 588 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 589 "mov %[val],%[ret]\n\t" 590 "bics %[val],%[bits]\n\t" 592 "bic %[val],%[ret],%[bits]\n\t" 594 "strex %[res],%[val],[%[mem]]\n\t" 603 #
if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))
618 #if defined(__CC_ARM) 619 static __asm uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
639 __STATIC_INLINE uint32_t atomic_chk32_all (uint32_t *mem, uint32_t bits) {
641 #pragma diag_suppress=Pe550 643 register uint32_t val, res;
645 #pragma diag_default=Pe550 647 register uint32_t ret;
651 ".syntax unified\n\t" 654 "ldrex %[ret],[%[mem]]\n\t" 655 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 656 "mov %[val],%[ret]\n\t" 657 "ands %[val],%[bits]\n\t" 659 "and %[val],%[ret],%[bits]\n\t" 661 "cmp %[val],%[bits]\n\t" 667 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 668 "mov %[val],%[ret]\n\t" 669 "bics %[val],%[bits]\n\t" 671 "bic %[val],%[ret],%[bits]\n\t" 673 "strex %[res],%[val],[%[mem]]\n\t" 693 #if defined(__CC_ARM) 694 static __asm uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
713 __STATIC_INLINE uint32_t atomic_chk32_any (uint32_t *mem, uint32_t bits) {
715 #pragma diag_suppress=Pe550 717 register uint32_t val, res;
719 #pragma diag_default=Pe550 721 register uint32_t ret;
725 ".syntax unified\n\t" 728 "ldrex %[ret],[%[mem]]\n\t" 729 "tst %[ret],%[bits]\n\t" 735 #if (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) 736 "mov %[val],%[ret]\n\t" 737 "bics %[val],%[bits]\n\t" 739 "bic %[val],%[ret],%[bits]\n\t" 741 "strex %[res],%[val],[%[mem]]\n\t" 760 #if defined(__CC_ARM) 761 static __asm uint32_t atomic_inc32 (uint32_t *mem) {
773 __STATIC_INLINE uint32_t atomic_inc32 (uint32_t *mem) {
775 #pragma diag_suppress=Pe550 777 register uint32_t val, res;
779 #pragma diag_default=Pe550 781 register uint32_t ret;
785 ".syntax unified\n\t" 788 "ldrex %[ret],[%[mem]]\n\t" 789 "adds %[val],%[ret],#1\n\t" 790 "strex %[res],%[val],[%[mem]]\n\t" 809 #if defined(__CC_ARM) 810 static __asm uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
828 __STATIC_INLINE uint16_t atomic_inc16_lt (uint16_t *mem, uint16_t max) {
830 #pragma diag_suppress=Pe550 832 register uint32_t val, res;
834 #pragma diag_default=Pe550 836 register uint16_t ret;
840 ".syntax unified\n\t" 843 "ldrexh %[ret],[%[mem]]\n\t" 844 "cmp %[max],%[ret]\n\t" 849 "adds %[val],%[ret],#1\n\t" 850 "strexh %[res],%[val],[%[mem]]\n\t" 870 #if defined(__CC_ARM) 871 static __asm uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
888 __STATIC_INLINE uint16_t atomic_inc16_lim (uint16_t *mem, uint16_t lim) {
890 #pragma diag_suppress=Pe550 892 register uint32_t val, res;
894 #pragma diag_default=Pe550 896 register uint16_t ret;
900 ".syntax unified\n\t" 903 "ldrexh %[ret],[%[mem]]\n\t" 904 "adds %[val],%[ret],#1\n\t" 905 "cmp %[lim],%[val]\n\t" 909 "strexh %[res],%[val],[%[mem]]\n\t" 928 #if defined(__CC_ARM) 929 static __asm uint32_t atomic_dec32 (uint32_t *mem) {
941 __STATIC_INLINE uint32_t atomic_dec32 (uint32_t *mem) {
943 #pragma diag_suppress=Pe550 945 register uint32_t val, res;
947 #pragma diag_default=Pe550 949 register uint32_t ret;
953 ".syntax unified\n\t" 956 "ldrex %[ret],[%[mem]]\n\t" 957 "subs %[val],%[ret],#1\n\t" 958 "strex %[res],%[val],[%[mem]]\n\t" 976 #if defined(__CC_ARM) 977 static __asm uint32_t atomic_dec32_nz (uint32_t *mem) {
993 __STATIC_INLINE uint32_t atomic_dec32_nz (uint32_t *mem) {
995 #pragma diag_suppress=Pe550 997 register uint32_t val, res;
999 #pragma diag_default=Pe550 1001 register uint32_t ret;
1005 ".syntax unified\n\t" 1008 "ldrex %[ret],[%[mem]]\n\t" 1009 "cbnz %[ret],2f\n\t" 1013 "subs %[val],%[ret],#1\n\t" 1014 "strex %[res],%[val],[%[mem]]\n\t" 1018 : [ret]
"=&l" (ret),
1032 #if defined(__CC_ARM) 1033 static __asm uint16_t atomic_dec16_nz (uint16_t *mem) {
1049 __STATIC_INLINE uint16_t atomic_dec16_nz (uint16_t *mem) {
1051 #pragma diag_suppress=Pe550 1053 register uint32_t val, res;
1055 #pragma diag_default=Pe550 1057 register uint16_t ret;
1061 ".syntax unified\n\t" 1064 "ldrexh %[ret],[%[mem]]\n\t" 1065 "cbnz %[ret],2f\n\t" 1069 "subs %[val],%[ret],#1\n\t" 1070 "strexh %[res],%[val],[%[mem]]\n\t" 1074 : [ret]
"=&l" (ret),
1088 #if defined(__CC_ARM) 1089 static __asm
void *atomic_link_get (
void **root) {
1105 __STATIC_INLINE
void *atomic_link_get (
void **root) {
1107 #pragma diag_suppress=Pe550 1109 register uint32_t val, res;
1111 #pragma diag_default=Pe550 1117 ".syntax unified\n\t" 1120 "ldrex %[ret],[%[root]]\n\t" 1121 "cbnz %[ret],2f\n\t" 1125 "ldr %[val],[%[ret]]\n\t" 1126 "strex %[res],%[val],[%[root]]\n\t" 1130 : [ret]
"=&l" (ret),
1144 #if defined(__CC_ARM) 1145 static __asm
void atomic_link_put (
void **root,
void *link) {
1161 __STATIC_INLINE
void atomic_link_put (
void **root,
void *link) {
1163 #pragma diag_suppress=Pe550 1165 register uint32_t val1, val2, res;
1167 #pragma diag_default=Pe550 1172 ".syntax unified\n\t" 1175 "ldr %[val1],[%[root]]\n\t" 1176 "str %[val1],[%[link]]\n\t" 1178 "ldrex %[val1],[%[root]]\n\t" 1179 "ldr %[val2],[%[link]]\n\t" 1180 "cmp %[val2],%[val1]\n\t" 1182 "strex %[res],%[link],[%[root]]\n\t" 1186 : [val1]
"=&l" (val1),
1187 [val2]
"=&l" (val2),
1189 : [root]
"l" (root),
1198 #endif // (EXCLUSIVE_ACCESS == 1) 1201 #endif // RTX_CORE_CM_H_