The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.
Dependents: hello SerialTestv11 SerialTestv12 Sierpinski ... more
mbed 2
This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.
Diff: TARGET_LPC1768/cmsis_iccarm.h
- Revision:
- 169:a7c7b631e539
- Parent:
- 160:5571c4ff569f
--- a/TARGET_LPC1768/cmsis_iccarm.h Thu May 24 15:35:55 2018 +0100 +++ b/TARGET_LPC1768/cmsis_iccarm.h Fri Jun 22 15:38:59 2018 +0100 @@ -1,13 +1,13 @@ /**************************************************************************//** * @file cmsis_iccarm.h - * @brief CMSIS compiler ICCARM (IAR compiler) header file - * @version V5.0.3 - * @date 29. August 2017 + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.5 + * @date 10. January 2018 ******************************************************************************/ //------------------------------------------------------------------------------ // -// Copyright (c) 2017 IAR Systems +// Copyright (c) 2017-2018 IAR Systems // // Licensed under the Apache License, Version 2.0 (the "License") // you may not use this file except in compliance with the License. @@ -116,7 +116,7 @@ #ifndef __NO_RETURN #if __ICCARM_V8 - #define __NO_RETURN __attribute__((noreturn)) + #define __NO_RETURN __attribute__((__noreturn__)) #else #define __NO_RETURN _Pragma("object_attribute=__noreturn") #endif @@ -150,18 +150,26 @@ #endif #ifndef __RESTRICT - #define __RESTRICT restrict + #define __RESTRICT restrict +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline #endif +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE #endif #ifndef __UNALIGNED_UINT16_READ #pragma language=save #pragma language=extended -__IAR_FT uint16_t __iar_uint16_read(void const *ptr) { +__IAR_FT uint16_t __iar_uint16_read(void const *ptr) +{ return *(__packed uint16_t*)(ptr); } #pragma language=restore @@ -172,7 +180,8 @@ #ifndef __UNALIGNED_UINT16_WRITE #pragma language=save #pragma language=extended -__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) { +__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) +{ *(__packed uint16_t*)(ptr) = val;; } #pragma language=restore @@ -182,7 +191,8 @@ #ifndef __UNALIGNED_UINT32_READ #pragma language=save #pragma language=extended -__IAR_FT uint32_t __iar_uint32_read(void const *ptr) { +__IAR_FT uint32_t __iar_uint32_read(void const *ptr) +{ return *(__packed uint32_t*)(ptr); } #pragma language=restore @@ -192,7 +202,8 @@ #ifndef __UNALIGNED_UINT32_WRITE #pragma language=save #pragma language=extended -__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) { +__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) +{ *(__packed uint32_t*)(ptr) = val;; } #pragma language=restore @@ -252,8 +263,8 @@ #define __disable_irq __iar_builtin_disable_interrupt #define __enable_fault_irq __iar_builtin_enable_fiq #define __enable_irq __iar_builtin_enable_interrupt - #define __arm_rsr __iar_builtin_rsr - #define __arm_wsr __iar_builtin_wsr + #define __arm_rsr __iar_builtin_rsr + #define __arm_wsr __iar_builtin_wsr #define __get_APSR() (__arm_rsr("APSR")) @@ -272,10 +283,24 @@ #define __get_IPSR() (__arm_rsr("IPSR")) #define __get_MSP() (__arm_rsr("MSP")) - #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __get_MSPLIM() (0U) + #else + #define __get_MSPLIM() (__arm_rsr("MSPLIM")) + #endif #define __get_PRIMASK() (__arm_rsr("PRIMASK")) #define __get_PSP() (__arm_rsr("PSP")) - #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __get_PSPLIM() (0U) + #else + #define __get_PSPLIM() (__arm_rsr("PSPLIM")) + #endif + #define __get_xPSR() (__arm_rsr("xPSR")) #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) @@ -283,10 +308,23 @@ #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) - #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + #define __set_MSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE))) + #endif #define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE))) #define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE))) - #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + #define __set_PSPLIM(VALUE) ((void)(VALUE)) + #else + #define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE))) + #endif #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) @@ -307,17 +345,14 @@ #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) - #define __NOP __iar_builtin_no_operation - - __IAR_FT uint8_t __CLZ(uint32_t val) { - return __iar_builtin_CLZ(val); - } + #define __NOP __iar_builtin_no_operation - #define __CLREX __iar_builtin_CLREX + #define __CLZ __iar_builtin_CLZ + #define __CLREX __iar_builtin_CLREX - #define __DMB __iar_builtin_DMB - #define __DSB __iar_builtin_DSB - #define __ISB __iar_builtin_ISB + #define __DMB __iar_builtin_DMB + #define __DSB __iar_builtin_DSB + #define __ISB __iar_builtin_ISB #define __LDREXB __iar_builtin_LDREXB #define __LDREXH __iar_builtin_LDREXH @@ -327,8 +362,9 @@ #define __REV __iar_builtin_REV #define __REV16 __iar_builtin_REV16 - __IAR_FT int32_t __REVSH(int32_t val) { - return __iar_builtin_REVSH((int16_t)val); + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); } #define __ROR __iar_builtin_ROR @@ -434,6 +470,10 @@ #define __set_FPSCR __cmsis_iar_set_FPSR_not_active #endif + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + #include <intrinsics.h> #if __IAR_M0_FAMILY @@ -444,22 +484,24 @@ #undef __RBIT #undef __get_APSR - __STATIC_INLINE uint8_t __CLZ(uint32_t data) { - if (data == 0u) { return 32u; } + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } - uint32_t count = 0; - uint32_t mask = 0x80000000; + uint32_t count = 0U; + uint32_t mask = 0x80000000U; - while ((data & mask) == 0) + while ((data & mask) == 0U) { - count += 1u; - mask = mask >> 1u; + count += 1U; + mask = mask >> 1U; } - return (count); + return count; } - __STATIC_INLINE uint32_t __RBIT(uint32_t v) { - uint8_t sc = 31; + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; uint32_t r = v; for (v >>= 1U; v; v >>= 1U) { @@ -470,7 +512,8 @@ return (r << sc); } - __STATIC_INLINE uint32_t __get_APSR(void) { + __STATIC_INLINE uint32_t __get_APSR(void) + { uint32_t res; __asm("MRS %0,APSR" : "=r" (res)); return res; @@ -497,11 +540,13 @@ #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) - __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) { + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { return __LDREX((unsigned long *)ptr); } - __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) { + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { return __STREX(value, (unsigned long *)ptr); } #endif @@ -510,115 +555,184 @@ /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ #if (__CORTEX_M >= 0x03) - __IAR_FT uint32_t __RRX(uint32_t value) { + __IAR_FT uint32_t __RRX(uint32_t value) + { uint32_t result; __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); return(result); } - __IAR_FT void __set_BASEPRI_MAX(uint32_t value) { + __IAR_FT void __set_BASEPRI_MAX(uint32_t value) + { __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); } - #define __enable_fault_irq __enable_fiq + #define __enable_fault_irq __enable_fiq #define __disable_fault_irq __disable_fiq #endif /* (__CORTEX_M >= 0x03) */ - __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) { + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); } #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) { + __IAR_FT uint32_t __get_MSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,MSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_MSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR MSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __get_PSPLIM(void) + { + uint32_t res; + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + res = 0U; + #else + __asm volatile("MRS %0,PSPLIM" : "=r" (res)); + #endif + return res; + } + + __IAR_FT void __set_PSPLIM(uint32_t value) + { + #if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)value; + #else + __asm volatile("MSR PSPLIM,%0" :: "r" (value)); + #endif + } + + __IAR_FT uint32_t __TZ_get_CONTROL_NS(void) + { uint32_t res; __asm volatile("MRS %0,CONTROL_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) { + __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) + { __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_PSP_NS(void) { + __IAR_FT uint32_t __TZ_get_PSP_NS(void) + { uint32_t res; __asm volatile("MRS %0,PSP_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_PSP_NS(uint32_t value) { + __IAR_FT void __TZ_set_PSP_NS(uint32_t value) + { __asm volatile("MSR PSP_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_MSP_NS(void) { + __IAR_FT uint32_t __TZ_get_MSP_NS(void) + { uint32_t res; __asm volatile("MRS %0,MSP_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_MSP_NS(uint32_t value) { + __IAR_FT void __TZ_set_MSP_NS(uint32_t value) + { __asm volatile("MSR MSP_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_SP_NS(void) { + __IAR_FT uint32_t __TZ_get_SP_NS(void) + { uint32_t res; __asm volatile("MRS %0,SP_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_SP_NS(uint32_t value) { + __IAR_FT void __TZ_set_SP_NS(uint32_t value) + { __asm volatile("MSR SP_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) { + __IAR_FT uint32_t __TZ_get_PRIMASK_NS(void) + { uint32_t res; __asm volatile("MRS %0,PRIMASK_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) { + __IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value) + { __asm volatile("MSR PRIMASK_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) { + __IAR_FT uint32_t __TZ_get_BASEPRI_NS(void) + { uint32_t res; __asm volatile("MRS %0,BASEPRI_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) { + __IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value) + { __asm volatile("MSR BASEPRI_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) { + __IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void) + { uint32_t res; __asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) { + __IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value) + { __asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) { + __IAR_FT uint32_t __TZ_get_PSPLIM_NS(void) + { uint32_t res; __asm volatile("MRS %0,PSPLIM_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) { + __IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value) + { __asm volatile("MSR PSPLIM_NS,%0" :: "r" (value)); } - __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) { + __IAR_FT uint32_t __TZ_get_MSPLIM_NS(void) + { uint32_t res; __asm volatile("MRS %0,MSPLIM_NS" : "=r" (res)); return res; } - __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) { + __IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value) + { __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); } @@ -629,25 +743,35 @@ #define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) #if __IAR_M0_FAMILY - __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) { - if ((sat >= 1U) && (sat <= 32U)) { + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); const int32_t min = -1 - max ; - if (val > max) { + if (val > max) + { return max; - } else if (val < min) { + } + else if (val < min) + { return min; } } return val; } - __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) { - if (sat <= 31U) { + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) { + if (val > (int32_t)max) + { return max; - } else if (val < 0) { + } + else if (val < 0) + { return 0U; } } @@ -657,33 +781,39 @@ #if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ - __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) { + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { uint32_t res; __ASM("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return ((uint8_t)res); } - __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) { + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { uint32_t res; __ASM("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return ((uint16_t)res); } - __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) { + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { uint32_t res; __ASM("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); return res; } - __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) { + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { __ASM("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); } - __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) { + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { __ASM("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); } - __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) { + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { __ASM("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); } @@ -693,67 +823,79 @@ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) { + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { uint32_t res; __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return ((uint8_t)res); } - __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) { + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { uint32_t res; __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return ((uint16_t)res); } - __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) { + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { uint32_t res; __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return res; } - __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) { + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { __ASM volatile ("STLB %1, [%0]" :: "r" (*ptr), "r" (value) : "memory"); } - __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) { + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { __ASM volatile ("STLH %1, [%0]" :: "r" (*ptr), "r" (value) : "memory"); } - __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) { + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { __ASM volatile ("STL %1, [%0]" :: "r" (*ptr), "r" (value) : "memory"); } - __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) { + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { uint32_t res; __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return ((uint8_t)res); } - __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) { + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { uint32_t res; __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return ((uint16_t)res); } - __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) { + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { uint32_t res; __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (*ptr) : "memory"); return res; } - __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) { + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { uint32_t res; __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (*ptr), "r" (value) : "memory"); return res; } - __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) { + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { uint32_t res; __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (*ptr), "r" (value) : "memory"); return res; } - __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) { + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { uint32_t res; __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (*ptr), "r" (value) : "memory"); return res;