mbed.h library with any bug fixes AV finds.

Dependents:   micromouse4_encoder_testing PID_Test Lab1_Test WorkingPID ... more

Committer:
aravindsv
Date:
Mon Nov 02 02:26:59 2015 +0000
Revision:
0:ba7650f404af
Reduced HSE_STARTUP_TIMEOUT to 500 ms, fixed some compiler warnings

Who changed what in which revision?

UserRevisionLine numberNew contents of line
aravindsv 0:ba7650f404af 1 /**************************************************************************//**
aravindsv 0:ba7650f404af 2 * @file core_cmInstr.h
aravindsv 0:ba7650f404af 3 * @brief CMSIS Cortex-M Core Instruction Access Header File
aravindsv 0:ba7650f404af 4 * @version V3.20
aravindsv 0:ba7650f404af 5 * @date 05. March 2013
aravindsv 0:ba7650f404af 6 *
aravindsv 0:ba7650f404af 7 * @note
aravindsv 0:ba7650f404af 8 *
aravindsv 0:ba7650f404af 9 ******************************************************************************/
aravindsv 0:ba7650f404af 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
aravindsv 0:ba7650f404af 11
aravindsv 0:ba7650f404af 12 All rights reserved.
aravindsv 0:ba7650f404af 13 Redistribution and use in source and binary forms, with or without
aravindsv 0:ba7650f404af 14 modification, are permitted provided that the following conditions are met:
aravindsv 0:ba7650f404af 15 - Redistributions of source code must retain the above copyright
aravindsv 0:ba7650f404af 16 notice, this list of conditions and the following disclaimer.
aravindsv 0:ba7650f404af 17 - Redistributions in binary form must reproduce the above copyright
aravindsv 0:ba7650f404af 18 notice, this list of conditions and the following disclaimer in the
aravindsv 0:ba7650f404af 19 documentation and/or other materials provided with the distribution.
aravindsv 0:ba7650f404af 20 - Neither the name of ARM nor the names of its contributors may be used
aravindsv 0:ba7650f404af 21 to endorse or promote products derived from this software without
aravindsv 0:ba7650f404af 22 specific prior written permission.
aravindsv 0:ba7650f404af 23 *
aravindsv 0:ba7650f404af 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
aravindsv 0:ba7650f404af 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
aravindsv 0:ba7650f404af 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
aravindsv 0:ba7650f404af 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
aravindsv 0:ba7650f404af 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
aravindsv 0:ba7650f404af 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
aravindsv 0:ba7650f404af 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
aravindsv 0:ba7650f404af 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
aravindsv 0:ba7650f404af 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
aravindsv 0:ba7650f404af 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
aravindsv 0:ba7650f404af 34 POSSIBILITY OF SUCH DAMAGE.
aravindsv 0:ba7650f404af 35 ---------------------------------------------------------------------------*/
aravindsv 0:ba7650f404af 36
aravindsv 0:ba7650f404af 37
aravindsv 0:ba7650f404af 38 #ifndef __CORE_CMINSTR_H
aravindsv 0:ba7650f404af 39 #define __CORE_CMINSTR_H
aravindsv 0:ba7650f404af 40
aravindsv 0:ba7650f404af 41
aravindsv 0:ba7650f404af 42 /* ########################## Core Instruction Access ######################### */
aravindsv 0:ba7650f404af 43 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
aravindsv 0:ba7650f404af 44 Access to dedicated instructions
aravindsv 0:ba7650f404af 45 @{
aravindsv 0:ba7650f404af 46 */
aravindsv 0:ba7650f404af 47
aravindsv 0:ba7650f404af 48 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
aravindsv 0:ba7650f404af 49 /* ARM armcc specific functions */
aravindsv 0:ba7650f404af 50
aravindsv 0:ba7650f404af 51 #if (__ARMCC_VERSION < 400677)
aravindsv 0:ba7650f404af 52 #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
aravindsv 0:ba7650f404af 53 #endif
aravindsv 0:ba7650f404af 54
aravindsv 0:ba7650f404af 55
aravindsv 0:ba7650f404af 56 /** \brief No Operation
aravindsv 0:ba7650f404af 57
aravindsv 0:ba7650f404af 58 No Operation does nothing. This instruction can be used for code alignment purposes.
aravindsv 0:ba7650f404af 59 */
aravindsv 0:ba7650f404af 60 #define __NOP __nop
aravindsv 0:ba7650f404af 61
aravindsv 0:ba7650f404af 62
aravindsv 0:ba7650f404af 63 /** \brief Wait For Interrupt
aravindsv 0:ba7650f404af 64
aravindsv 0:ba7650f404af 65 Wait For Interrupt is a hint instruction that suspends execution
aravindsv 0:ba7650f404af 66 until one of a number of events occurs.
aravindsv 0:ba7650f404af 67 */
aravindsv 0:ba7650f404af 68 #define __WFI __wfi
aravindsv 0:ba7650f404af 69
aravindsv 0:ba7650f404af 70
aravindsv 0:ba7650f404af 71 /** \brief Wait For Event
aravindsv 0:ba7650f404af 72
aravindsv 0:ba7650f404af 73 Wait For Event is a hint instruction that permits the processor to enter
aravindsv 0:ba7650f404af 74 a low-power state until one of a number of events occurs.
aravindsv 0:ba7650f404af 75 */
aravindsv 0:ba7650f404af 76 #define __WFE __wfe
aravindsv 0:ba7650f404af 77
aravindsv 0:ba7650f404af 78
aravindsv 0:ba7650f404af 79 /** \brief Send Event
aravindsv 0:ba7650f404af 80
aravindsv 0:ba7650f404af 81 Send Event is a hint instruction. It causes an event to be signaled to the CPU.
aravindsv 0:ba7650f404af 82 */
aravindsv 0:ba7650f404af 83 #define __SEV __sev
aravindsv 0:ba7650f404af 84
aravindsv 0:ba7650f404af 85
aravindsv 0:ba7650f404af 86 /** \brief Instruction Synchronization Barrier
aravindsv 0:ba7650f404af 87
aravindsv 0:ba7650f404af 88 Instruction Synchronization Barrier flushes the pipeline in the processor,
aravindsv 0:ba7650f404af 89 so that all instructions following the ISB are fetched from cache or
aravindsv 0:ba7650f404af 90 memory, after the instruction has been completed.
aravindsv 0:ba7650f404af 91 */
aravindsv 0:ba7650f404af 92 #define __ISB() __isb(0xF)
aravindsv 0:ba7650f404af 93
aravindsv 0:ba7650f404af 94
aravindsv 0:ba7650f404af 95 /** \brief Data Synchronization Barrier
aravindsv 0:ba7650f404af 96
aravindsv 0:ba7650f404af 97 This function acts as a special kind of Data Memory Barrier.
aravindsv 0:ba7650f404af 98 It completes when all explicit memory accesses before this instruction complete.
aravindsv 0:ba7650f404af 99 */
aravindsv 0:ba7650f404af 100 #define __DSB() __dsb(0xF)
aravindsv 0:ba7650f404af 101
aravindsv 0:ba7650f404af 102
aravindsv 0:ba7650f404af 103 /** \brief Data Memory Barrier
aravindsv 0:ba7650f404af 104
aravindsv 0:ba7650f404af 105 This function ensures the apparent order of the explicit memory operations before
aravindsv 0:ba7650f404af 106 and after the instruction, without ensuring their completion.
aravindsv 0:ba7650f404af 107 */
aravindsv 0:ba7650f404af 108 #define __DMB() __dmb(0xF)
aravindsv 0:ba7650f404af 109
aravindsv 0:ba7650f404af 110
aravindsv 0:ba7650f404af 111 /** \brief Reverse byte order (32 bit)
aravindsv 0:ba7650f404af 112
aravindsv 0:ba7650f404af 113 This function reverses the byte order in integer value.
aravindsv 0:ba7650f404af 114
aravindsv 0:ba7650f404af 115 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 116 \return Reversed value
aravindsv 0:ba7650f404af 117 */
aravindsv 0:ba7650f404af 118 #define __REV __rev
aravindsv 0:ba7650f404af 119
aravindsv 0:ba7650f404af 120
aravindsv 0:ba7650f404af 121 /** \brief Reverse byte order (16 bit)
aravindsv 0:ba7650f404af 122
aravindsv 0:ba7650f404af 123 This function reverses the byte order in two unsigned short values.
aravindsv 0:ba7650f404af 124
aravindsv 0:ba7650f404af 125 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 126 \return Reversed value
aravindsv 0:ba7650f404af 127 */
aravindsv 0:ba7650f404af 128 #ifndef __NO_EMBEDDED_ASM
aravindsv 0:ba7650f404af 129 __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
aravindsv 0:ba7650f404af 130 {
aravindsv 0:ba7650f404af 131 rev16 r0, r0
aravindsv 0:ba7650f404af 132 bx lr
aravindsv 0:ba7650f404af 133 }
aravindsv 0:ba7650f404af 134 #endif
aravindsv 0:ba7650f404af 135
aravindsv 0:ba7650f404af 136 /** \brief Reverse byte order in signed short value
aravindsv 0:ba7650f404af 137
aravindsv 0:ba7650f404af 138 This function reverses the byte order in a signed short value with sign extension to integer.
aravindsv 0:ba7650f404af 139
aravindsv 0:ba7650f404af 140 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 141 \return Reversed value
aravindsv 0:ba7650f404af 142 */
aravindsv 0:ba7650f404af 143 #ifndef __NO_EMBEDDED_ASM
aravindsv 0:ba7650f404af 144 __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
aravindsv 0:ba7650f404af 145 {
aravindsv 0:ba7650f404af 146 revsh r0, r0
aravindsv 0:ba7650f404af 147 bx lr
aravindsv 0:ba7650f404af 148 }
aravindsv 0:ba7650f404af 149 #endif
aravindsv 0:ba7650f404af 150
aravindsv 0:ba7650f404af 151
aravindsv 0:ba7650f404af 152 /** \brief Rotate Right in unsigned value (32 bit)
aravindsv 0:ba7650f404af 153
aravindsv 0:ba7650f404af 154 This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
aravindsv 0:ba7650f404af 155
aravindsv 0:ba7650f404af 156 \param [in] value Value to rotate
aravindsv 0:ba7650f404af 157 \param [in] value Number of Bits to rotate
aravindsv 0:ba7650f404af 158 \return Rotated value
aravindsv 0:ba7650f404af 159 */
aravindsv 0:ba7650f404af 160 #define __ROR __ror
aravindsv 0:ba7650f404af 161
aravindsv 0:ba7650f404af 162
aravindsv 0:ba7650f404af 163 /** \brief Breakpoint
aravindsv 0:ba7650f404af 164
aravindsv 0:ba7650f404af 165 This function causes the processor to enter Debug state.
aravindsv 0:ba7650f404af 166 Debug tools can use this to investigate system state when the instruction at a particular address is reached.
aravindsv 0:ba7650f404af 167
aravindsv 0:ba7650f404af 168 \param [in] value is ignored by the processor.
aravindsv 0:ba7650f404af 169 If required, a debugger can use it to store additional information about the breakpoint.
aravindsv 0:ba7650f404af 170 */
aravindsv 0:ba7650f404af 171 #define __BKPT(value) __breakpoint(value)
aravindsv 0:ba7650f404af 172
aravindsv 0:ba7650f404af 173
aravindsv 0:ba7650f404af 174 #if (__CORTEX_M >= 0x03)
aravindsv 0:ba7650f404af 175
aravindsv 0:ba7650f404af 176 /** \brief Reverse bit order of value
aravindsv 0:ba7650f404af 177
aravindsv 0:ba7650f404af 178 This function reverses the bit order of the given value.
aravindsv 0:ba7650f404af 179
aravindsv 0:ba7650f404af 180 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 181 \return Reversed value
aravindsv 0:ba7650f404af 182 */
aravindsv 0:ba7650f404af 183 #define __RBIT __rbit
aravindsv 0:ba7650f404af 184
aravindsv 0:ba7650f404af 185
aravindsv 0:ba7650f404af 186 /** \brief LDR Exclusive (8 bit)
aravindsv 0:ba7650f404af 187
aravindsv 0:ba7650f404af 188 This function performs a exclusive LDR command for 8 bit value.
aravindsv 0:ba7650f404af 189
aravindsv 0:ba7650f404af 190 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 191 \return value of type uint8_t at (*ptr)
aravindsv 0:ba7650f404af 192 */
aravindsv 0:ba7650f404af 193 #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
aravindsv 0:ba7650f404af 194
aravindsv 0:ba7650f404af 195
aravindsv 0:ba7650f404af 196 /** \brief LDR Exclusive (16 bit)
aravindsv 0:ba7650f404af 197
aravindsv 0:ba7650f404af 198 This function performs a exclusive LDR command for 16 bit values.
aravindsv 0:ba7650f404af 199
aravindsv 0:ba7650f404af 200 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 201 \return value of type uint16_t at (*ptr)
aravindsv 0:ba7650f404af 202 */
aravindsv 0:ba7650f404af 203 #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
aravindsv 0:ba7650f404af 204
aravindsv 0:ba7650f404af 205
aravindsv 0:ba7650f404af 206 /** \brief LDR Exclusive (32 bit)
aravindsv 0:ba7650f404af 207
aravindsv 0:ba7650f404af 208 This function performs a exclusive LDR command for 32 bit values.
aravindsv 0:ba7650f404af 209
aravindsv 0:ba7650f404af 210 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 211 \return value of type uint32_t at (*ptr)
aravindsv 0:ba7650f404af 212 */
aravindsv 0:ba7650f404af 213 #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
aravindsv 0:ba7650f404af 214
aravindsv 0:ba7650f404af 215
aravindsv 0:ba7650f404af 216 /** \brief STR Exclusive (8 bit)
aravindsv 0:ba7650f404af 217
aravindsv 0:ba7650f404af 218 This function performs a exclusive STR command for 8 bit values.
aravindsv 0:ba7650f404af 219
aravindsv 0:ba7650f404af 220 \param [in] value Value to store
aravindsv 0:ba7650f404af 221 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 222 \return 0 Function succeeded
aravindsv 0:ba7650f404af 223 \return 1 Function failed
aravindsv 0:ba7650f404af 224 */
aravindsv 0:ba7650f404af 225 #define __STREXB(value, ptr) __strex(value, ptr)
aravindsv 0:ba7650f404af 226
aravindsv 0:ba7650f404af 227
aravindsv 0:ba7650f404af 228 /** \brief STR Exclusive (16 bit)
aravindsv 0:ba7650f404af 229
aravindsv 0:ba7650f404af 230 This function performs a exclusive STR command for 16 bit values.
aravindsv 0:ba7650f404af 231
aravindsv 0:ba7650f404af 232 \param [in] value Value to store
aravindsv 0:ba7650f404af 233 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 234 \return 0 Function succeeded
aravindsv 0:ba7650f404af 235 \return 1 Function failed
aravindsv 0:ba7650f404af 236 */
aravindsv 0:ba7650f404af 237 #define __STREXH(value, ptr) __strex(value, ptr)
aravindsv 0:ba7650f404af 238
aravindsv 0:ba7650f404af 239
aravindsv 0:ba7650f404af 240 /** \brief STR Exclusive (32 bit)
aravindsv 0:ba7650f404af 241
aravindsv 0:ba7650f404af 242 This function performs a exclusive STR command for 32 bit values.
aravindsv 0:ba7650f404af 243
aravindsv 0:ba7650f404af 244 \param [in] value Value to store
aravindsv 0:ba7650f404af 245 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 246 \return 0 Function succeeded
aravindsv 0:ba7650f404af 247 \return 1 Function failed
aravindsv 0:ba7650f404af 248 */
aravindsv 0:ba7650f404af 249 #define __STREXW(value, ptr) __strex(value, ptr)
aravindsv 0:ba7650f404af 250
aravindsv 0:ba7650f404af 251
aravindsv 0:ba7650f404af 252 /** \brief Remove the exclusive lock
aravindsv 0:ba7650f404af 253
aravindsv 0:ba7650f404af 254 This function removes the exclusive lock which is created by LDREX.
aravindsv 0:ba7650f404af 255
aravindsv 0:ba7650f404af 256 */
aravindsv 0:ba7650f404af 257 #define __CLREX __clrex
aravindsv 0:ba7650f404af 258
aravindsv 0:ba7650f404af 259
aravindsv 0:ba7650f404af 260 /** \brief Signed Saturate
aravindsv 0:ba7650f404af 261
aravindsv 0:ba7650f404af 262 This function saturates a signed value.
aravindsv 0:ba7650f404af 263
aravindsv 0:ba7650f404af 264 \param [in] value Value to be saturated
aravindsv 0:ba7650f404af 265 \param [in] sat Bit position to saturate to (1..32)
aravindsv 0:ba7650f404af 266 \return Saturated value
aravindsv 0:ba7650f404af 267 */
aravindsv 0:ba7650f404af 268 #define __SSAT __ssat
aravindsv 0:ba7650f404af 269
aravindsv 0:ba7650f404af 270
aravindsv 0:ba7650f404af 271 /** \brief Unsigned Saturate
aravindsv 0:ba7650f404af 272
aravindsv 0:ba7650f404af 273 This function saturates an unsigned value.
aravindsv 0:ba7650f404af 274
aravindsv 0:ba7650f404af 275 \param [in] value Value to be saturated
aravindsv 0:ba7650f404af 276 \param [in] sat Bit position to saturate to (0..31)
aravindsv 0:ba7650f404af 277 \return Saturated value
aravindsv 0:ba7650f404af 278 */
aravindsv 0:ba7650f404af 279 #define __USAT __usat
aravindsv 0:ba7650f404af 280
aravindsv 0:ba7650f404af 281
aravindsv 0:ba7650f404af 282 /** \brief Count leading zeros
aravindsv 0:ba7650f404af 283
aravindsv 0:ba7650f404af 284 This function counts the number of leading zeros of a data value.
aravindsv 0:ba7650f404af 285
aravindsv 0:ba7650f404af 286 \param [in] value Value to count the leading zeros
aravindsv 0:ba7650f404af 287 \return number of leading zeros in value
aravindsv 0:ba7650f404af 288 */
aravindsv 0:ba7650f404af 289 #define __CLZ __clz
aravindsv 0:ba7650f404af 290
aravindsv 0:ba7650f404af 291 #endif /* (__CORTEX_M >= 0x03) */
aravindsv 0:ba7650f404af 292
aravindsv 0:ba7650f404af 293
aravindsv 0:ba7650f404af 294
aravindsv 0:ba7650f404af 295 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
aravindsv 0:ba7650f404af 296 /* IAR iccarm specific functions */
aravindsv 0:ba7650f404af 297
aravindsv 0:ba7650f404af 298 #include <cmsis_iar.h>
aravindsv 0:ba7650f404af 299
aravindsv 0:ba7650f404af 300
aravindsv 0:ba7650f404af 301 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
aravindsv 0:ba7650f404af 302 /* TI CCS specific functions */
aravindsv 0:ba7650f404af 303
aravindsv 0:ba7650f404af 304 #include <cmsis_ccs.h>
aravindsv 0:ba7650f404af 305
aravindsv 0:ba7650f404af 306
aravindsv 0:ba7650f404af 307 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
aravindsv 0:ba7650f404af 308 /* GNU gcc specific functions */
aravindsv 0:ba7650f404af 309
aravindsv 0:ba7650f404af 310 /* Define macros for porting to both thumb1 and thumb2.
aravindsv 0:ba7650f404af 311 * For thumb1, use low register (r0-r7), specified by constrant "l"
aravindsv 0:ba7650f404af 312 * Otherwise, use general registers, specified by constrant "r" */
aravindsv 0:ba7650f404af 313 #if defined (__thumb__) && !defined (__thumb2__)
aravindsv 0:ba7650f404af 314 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
aravindsv 0:ba7650f404af 315 #define __CMSIS_GCC_USE_REG(r) "l" (r)
aravindsv 0:ba7650f404af 316 #else
aravindsv 0:ba7650f404af 317 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
aravindsv 0:ba7650f404af 318 #define __CMSIS_GCC_USE_REG(r) "r" (r)
aravindsv 0:ba7650f404af 319 #endif
aravindsv 0:ba7650f404af 320
aravindsv 0:ba7650f404af 321 /** \brief No Operation
aravindsv 0:ba7650f404af 322
aravindsv 0:ba7650f404af 323 No Operation does nothing. This instruction can be used for code alignment purposes.
aravindsv 0:ba7650f404af 324 */
aravindsv 0:ba7650f404af 325 __attribute__( ( always_inline ) ) __STATIC_INLINE void __NOP(void)
aravindsv 0:ba7650f404af 326 {
aravindsv 0:ba7650f404af 327 __ASM volatile ("nop");
aravindsv 0:ba7650f404af 328 }
aravindsv 0:ba7650f404af 329
aravindsv 0:ba7650f404af 330
aravindsv 0:ba7650f404af 331 /** \brief Wait For Interrupt
aravindsv 0:ba7650f404af 332
aravindsv 0:ba7650f404af 333 Wait For Interrupt is a hint instruction that suspends execution
aravindsv 0:ba7650f404af 334 until one of a number of events occurs.
aravindsv 0:ba7650f404af 335 */
aravindsv 0:ba7650f404af 336 __attribute__( ( always_inline ) ) __STATIC_INLINE void __WFI(void)
aravindsv 0:ba7650f404af 337 {
aravindsv 0:ba7650f404af 338 __ASM volatile ("wfi");
aravindsv 0:ba7650f404af 339 }
aravindsv 0:ba7650f404af 340
aravindsv 0:ba7650f404af 341
aravindsv 0:ba7650f404af 342 /** \brief Wait For Event
aravindsv 0:ba7650f404af 343
aravindsv 0:ba7650f404af 344 Wait For Event is a hint instruction that permits the processor to enter
aravindsv 0:ba7650f404af 345 a low-power state until one of a number of events occurs.
aravindsv 0:ba7650f404af 346 */
aravindsv 0:ba7650f404af 347 __attribute__( ( always_inline ) ) __STATIC_INLINE void __WFE(void)
aravindsv 0:ba7650f404af 348 {
aravindsv 0:ba7650f404af 349 __ASM volatile ("wfe");
aravindsv 0:ba7650f404af 350 }
aravindsv 0:ba7650f404af 351
aravindsv 0:ba7650f404af 352
aravindsv 0:ba7650f404af 353 /** \brief Send Event
aravindsv 0:ba7650f404af 354
aravindsv 0:ba7650f404af 355 Send Event is a hint instruction. It causes an event to be signaled to the CPU.
aravindsv 0:ba7650f404af 356 */
aravindsv 0:ba7650f404af 357 __attribute__( ( always_inline ) ) __STATIC_INLINE void __SEV(void)
aravindsv 0:ba7650f404af 358 {
aravindsv 0:ba7650f404af 359 __ASM volatile ("sev");
aravindsv 0:ba7650f404af 360 }
aravindsv 0:ba7650f404af 361
aravindsv 0:ba7650f404af 362
aravindsv 0:ba7650f404af 363 /** \brief Instruction Synchronization Barrier
aravindsv 0:ba7650f404af 364
aravindsv 0:ba7650f404af 365 Instruction Synchronization Barrier flushes the pipeline in the processor,
aravindsv 0:ba7650f404af 366 so that all instructions following the ISB are fetched from cache or
aravindsv 0:ba7650f404af 367 memory, after the instruction has been completed.
aravindsv 0:ba7650f404af 368 */
aravindsv 0:ba7650f404af 369 __attribute__( ( always_inline ) ) __STATIC_INLINE void __ISB(void)
aravindsv 0:ba7650f404af 370 {
aravindsv 0:ba7650f404af 371 __ASM volatile ("isb");
aravindsv 0:ba7650f404af 372 }
aravindsv 0:ba7650f404af 373
aravindsv 0:ba7650f404af 374
aravindsv 0:ba7650f404af 375 /** \brief Data Synchronization Barrier
aravindsv 0:ba7650f404af 376
aravindsv 0:ba7650f404af 377 This function acts as a special kind of Data Memory Barrier.
aravindsv 0:ba7650f404af 378 It completes when all explicit memory accesses before this instruction complete.
aravindsv 0:ba7650f404af 379 */
aravindsv 0:ba7650f404af 380 __attribute__( ( always_inline ) ) __STATIC_INLINE void __DSB(void)
aravindsv 0:ba7650f404af 381 {
aravindsv 0:ba7650f404af 382 __ASM volatile ("dsb");
aravindsv 0:ba7650f404af 383 }
aravindsv 0:ba7650f404af 384
aravindsv 0:ba7650f404af 385
aravindsv 0:ba7650f404af 386 /** \brief Data Memory Barrier
aravindsv 0:ba7650f404af 387
aravindsv 0:ba7650f404af 388 This function ensures the apparent order of the explicit memory operations before
aravindsv 0:ba7650f404af 389 and after the instruction, without ensuring their completion.
aravindsv 0:ba7650f404af 390 */
aravindsv 0:ba7650f404af 391 __attribute__( ( always_inline ) ) __STATIC_INLINE void __DMB(void)
aravindsv 0:ba7650f404af 392 {
aravindsv 0:ba7650f404af 393 __ASM volatile ("dmb");
aravindsv 0:ba7650f404af 394 }
aravindsv 0:ba7650f404af 395
aravindsv 0:ba7650f404af 396
aravindsv 0:ba7650f404af 397 /** \brief Reverse byte order (32 bit)
aravindsv 0:ba7650f404af 398
aravindsv 0:ba7650f404af 399 This function reverses the byte order in integer value.
aravindsv 0:ba7650f404af 400
aravindsv 0:ba7650f404af 401 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 402 \return Reversed value
aravindsv 0:ba7650f404af 403 */
aravindsv 0:ba7650f404af 404 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV(uint32_t value)
aravindsv 0:ba7650f404af 405 {
aravindsv 0:ba7650f404af 406 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
aravindsv 0:ba7650f404af 407 return __builtin_bswap32(value);
aravindsv 0:ba7650f404af 408 #else
aravindsv 0:ba7650f404af 409 uint32_t result;
aravindsv 0:ba7650f404af 410
aravindsv 0:ba7650f404af 411 __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
aravindsv 0:ba7650f404af 412 return(result);
aravindsv 0:ba7650f404af 413 #endif
aravindsv 0:ba7650f404af 414 }
aravindsv 0:ba7650f404af 415
aravindsv 0:ba7650f404af 416
aravindsv 0:ba7650f404af 417 /** \brief Reverse byte order (16 bit)
aravindsv 0:ba7650f404af 418
aravindsv 0:ba7650f404af 419 This function reverses the byte order in two unsigned short values.
aravindsv 0:ba7650f404af 420
aravindsv 0:ba7650f404af 421 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 422 \return Reversed value
aravindsv 0:ba7650f404af 423 */
aravindsv 0:ba7650f404af 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t value)
aravindsv 0:ba7650f404af 425 {
aravindsv 0:ba7650f404af 426 uint32_t result;
aravindsv 0:ba7650f404af 427
aravindsv 0:ba7650f404af 428 __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
aravindsv 0:ba7650f404af 429 return(result);
aravindsv 0:ba7650f404af 430 }
aravindsv 0:ba7650f404af 431
aravindsv 0:ba7650f404af 432
aravindsv 0:ba7650f404af 433 /** \brief Reverse byte order in signed short value
aravindsv 0:ba7650f404af 434
aravindsv 0:ba7650f404af 435 This function reverses the byte order in a signed short value with sign extension to integer.
aravindsv 0:ba7650f404af 436
aravindsv 0:ba7650f404af 437 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 438 \return Reversed value
aravindsv 0:ba7650f404af 439 */
aravindsv 0:ba7650f404af 440 __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value)
aravindsv 0:ba7650f404af 441 {
aravindsv 0:ba7650f404af 442 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
aravindsv 0:ba7650f404af 443 return (short)__builtin_bswap16(value);
aravindsv 0:ba7650f404af 444 #else
aravindsv 0:ba7650f404af 445 uint32_t result;
aravindsv 0:ba7650f404af 446
aravindsv 0:ba7650f404af 447 __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
aravindsv 0:ba7650f404af 448 return(result);
aravindsv 0:ba7650f404af 449 #endif
aravindsv 0:ba7650f404af 450 }
aravindsv 0:ba7650f404af 451
aravindsv 0:ba7650f404af 452
aravindsv 0:ba7650f404af 453 /** \brief Rotate Right in unsigned value (32 bit)
aravindsv 0:ba7650f404af 454
aravindsv 0:ba7650f404af 455 This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
aravindsv 0:ba7650f404af 456
aravindsv 0:ba7650f404af 457 \param [in] value Value to rotate
aravindsv 0:ba7650f404af 458 \param [in] value Number of Bits to rotate
aravindsv 0:ba7650f404af 459 \return Rotated value
aravindsv 0:ba7650f404af 460 */
aravindsv 0:ba7650f404af 461 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
aravindsv 0:ba7650f404af 462 {
aravindsv 0:ba7650f404af 463 return (op1 >> op2) | (op1 << (32 - op2));
aravindsv 0:ba7650f404af 464 }
aravindsv 0:ba7650f404af 465
aravindsv 0:ba7650f404af 466
aravindsv 0:ba7650f404af 467 /** \brief Breakpoint
aravindsv 0:ba7650f404af 468
aravindsv 0:ba7650f404af 469 This function causes the processor to enter Debug state.
aravindsv 0:ba7650f404af 470 Debug tools can use this to investigate system state when the instruction at a particular address is reached.
aravindsv 0:ba7650f404af 471
aravindsv 0:ba7650f404af 472 \param [in] value is ignored by the processor.
aravindsv 0:ba7650f404af 473 If required, a debugger can use it to store additional information about the breakpoint.
aravindsv 0:ba7650f404af 474 */
aravindsv 0:ba7650f404af 475 #define __BKPT(value) __ASM volatile ("bkpt "#value)
aravindsv 0:ba7650f404af 476
aravindsv 0:ba7650f404af 477
aravindsv 0:ba7650f404af 478 #if (__CORTEX_M >= 0x03)
aravindsv 0:ba7650f404af 479
aravindsv 0:ba7650f404af 480 /** \brief Reverse bit order of value
aravindsv 0:ba7650f404af 481
aravindsv 0:ba7650f404af 482 This function reverses the bit order of the given value.
aravindsv 0:ba7650f404af 483
aravindsv 0:ba7650f404af 484 \param [in] value Value to reverse
aravindsv 0:ba7650f404af 485 \return Reversed value
aravindsv 0:ba7650f404af 486 */
aravindsv 0:ba7650f404af 487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
aravindsv 0:ba7650f404af 488 {
aravindsv 0:ba7650f404af 489 uint32_t result;
aravindsv 0:ba7650f404af 490
aravindsv 0:ba7650f404af 491 __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
aravindsv 0:ba7650f404af 492 return(result);
aravindsv 0:ba7650f404af 493 }
aravindsv 0:ba7650f404af 494
aravindsv 0:ba7650f404af 495
aravindsv 0:ba7650f404af 496 /** \brief LDR Exclusive (8 bit)
aravindsv 0:ba7650f404af 497
aravindsv 0:ba7650f404af 498 This function performs a exclusive LDR command for 8 bit value.
aravindsv 0:ba7650f404af 499
aravindsv 0:ba7650f404af 500 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 501 \return value of type uint8_t at (*ptr)
aravindsv 0:ba7650f404af 502 */
aravindsv 0:ba7650f404af 503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
aravindsv 0:ba7650f404af 504 {
aravindsv 0:ba7650f404af 505 uint32_t result;
aravindsv 0:ba7650f404af 506
aravindsv 0:ba7650f404af 507 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
aravindsv 0:ba7650f404af 508 __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
aravindsv 0:ba7650f404af 509 #else
aravindsv 0:ba7650f404af 510 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
aravindsv 0:ba7650f404af 511 accepted by assembler. So has to use following less efficient pattern.
aravindsv 0:ba7650f404af 512 */
aravindsv 0:ba7650f404af 513 __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
aravindsv 0:ba7650f404af 514 #endif
aravindsv 0:ba7650f404af 515 return(result);
aravindsv 0:ba7650f404af 516 }
aravindsv 0:ba7650f404af 517
aravindsv 0:ba7650f404af 518
aravindsv 0:ba7650f404af 519 /** \brief LDR Exclusive (16 bit)
aravindsv 0:ba7650f404af 520
aravindsv 0:ba7650f404af 521 This function performs a exclusive LDR command for 16 bit values.
aravindsv 0:ba7650f404af 522
aravindsv 0:ba7650f404af 523 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 524 \return value of type uint16_t at (*ptr)
aravindsv 0:ba7650f404af 525 */
aravindsv 0:ba7650f404af 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
aravindsv 0:ba7650f404af 527 {
aravindsv 0:ba7650f404af 528 uint32_t result;
aravindsv 0:ba7650f404af 529
aravindsv 0:ba7650f404af 530 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
aravindsv 0:ba7650f404af 531 __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
aravindsv 0:ba7650f404af 532 #else
aravindsv 0:ba7650f404af 533 /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
aravindsv 0:ba7650f404af 534 accepted by assembler. So has to use following less efficient pattern.
aravindsv 0:ba7650f404af 535 */
aravindsv 0:ba7650f404af 536 __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
aravindsv 0:ba7650f404af 537 #endif
aravindsv 0:ba7650f404af 538 return(result);
aravindsv 0:ba7650f404af 539 }
aravindsv 0:ba7650f404af 540
aravindsv 0:ba7650f404af 541
aravindsv 0:ba7650f404af 542 /** \brief LDR Exclusive (32 bit)
aravindsv 0:ba7650f404af 543
aravindsv 0:ba7650f404af 544 This function performs a exclusive LDR command for 32 bit values.
aravindsv 0:ba7650f404af 545
aravindsv 0:ba7650f404af 546 \param [in] ptr Pointer to data
aravindsv 0:ba7650f404af 547 \return value of type uint32_t at (*ptr)
aravindsv 0:ba7650f404af 548 */
aravindsv 0:ba7650f404af 549 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
aravindsv 0:ba7650f404af 550 {
aravindsv 0:ba7650f404af 551 uint32_t result;
aravindsv 0:ba7650f404af 552
aravindsv 0:ba7650f404af 553 __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
aravindsv 0:ba7650f404af 554 return(result);
aravindsv 0:ba7650f404af 555 }
aravindsv 0:ba7650f404af 556
aravindsv 0:ba7650f404af 557
aravindsv 0:ba7650f404af 558 /** \brief STR Exclusive (8 bit)
aravindsv 0:ba7650f404af 559
aravindsv 0:ba7650f404af 560 This function performs a exclusive STR command for 8 bit values.
aravindsv 0:ba7650f404af 561
aravindsv 0:ba7650f404af 562 \param [in] value Value to store
aravindsv 0:ba7650f404af 563 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 564 \return 0 Function succeeded
aravindsv 0:ba7650f404af 565 \return 1 Function failed
aravindsv 0:ba7650f404af 566 */
aravindsv 0:ba7650f404af 567 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
aravindsv 0:ba7650f404af 568 {
aravindsv 0:ba7650f404af 569 uint32_t result;
aravindsv 0:ba7650f404af 570
aravindsv 0:ba7650f404af 571 __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
aravindsv 0:ba7650f404af 572 return(result);
aravindsv 0:ba7650f404af 573 }
aravindsv 0:ba7650f404af 574
aravindsv 0:ba7650f404af 575
aravindsv 0:ba7650f404af 576 /** \brief STR Exclusive (16 bit)
aravindsv 0:ba7650f404af 577
aravindsv 0:ba7650f404af 578 This function performs a exclusive STR command for 16 bit values.
aravindsv 0:ba7650f404af 579
aravindsv 0:ba7650f404af 580 \param [in] value Value to store
aravindsv 0:ba7650f404af 581 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 582 \return 0 Function succeeded
aravindsv 0:ba7650f404af 583 \return 1 Function failed
aravindsv 0:ba7650f404af 584 */
aravindsv 0:ba7650f404af 585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
aravindsv 0:ba7650f404af 586 {
aravindsv 0:ba7650f404af 587 uint32_t result;
aravindsv 0:ba7650f404af 588
aravindsv 0:ba7650f404af 589 __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
aravindsv 0:ba7650f404af 590 return(result);
aravindsv 0:ba7650f404af 591 }
aravindsv 0:ba7650f404af 592
aravindsv 0:ba7650f404af 593
aravindsv 0:ba7650f404af 594 /** \brief STR Exclusive (32 bit)
aravindsv 0:ba7650f404af 595
aravindsv 0:ba7650f404af 596 This function performs a exclusive STR command for 32 bit values.
aravindsv 0:ba7650f404af 597
aravindsv 0:ba7650f404af 598 \param [in] value Value to store
aravindsv 0:ba7650f404af 599 \param [in] ptr Pointer to location
aravindsv 0:ba7650f404af 600 \return 0 Function succeeded
aravindsv 0:ba7650f404af 601 \return 1 Function failed
aravindsv 0:ba7650f404af 602 */
aravindsv 0:ba7650f404af 603 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
aravindsv 0:ba7650f404af 604 {
aravindsv 0:ba7650f404af 605 uint32_t result;
aravindsv 0:ba7650f404af 606
aravindsv 0:ba7650f404af 607 __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
aravindsv 0:ba7650f404af 608 return(result);
aravindsv 0:ba7650f404af 609 }
aravindsv 0:ba7650f404af 610
aravindsv 0:ba7650f404af 611
aravindsv 0:ba7650f404af 612 /** \brief Remove the exclusive lock
aravindsv 0:ba7650f404af 613
aravindsv 0:ba7650f404af 614 This function removes the exclusive lock which is created by LDREX.
aravindsv 0:ba7650f404af 615
aravindsv 0:ba7650f404af 616 */
aravindsv 0:ba7650f404af 617 __attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void)
aravindsv 0:ba7650f404af 618 {
aravindsv 0:ba7650f404af 619 __ASM volatile ("clrex" ::: "memory");
aravindsv 0:ba7650f404af 620 }
aravindsv 0:ba7650f404af 621
aravindsv 0:ba7650f404af 622
aravindsv 0:ba7650f404af 623 /** \brief Signed Saturate
aravindsv 0:ba7650f404af 624
aravindsv 0:ba7650f404af 625 This function saturates a signed value.
aravindsv 0:ba7650f404af 626
aravindsv 0:ba7650f404af 627 \param [in] value Value to be saturated
aravindsv 0:ba7650f404af 628 \param [in] sat Bit position to saturate to (1..32)
aravindsv 0:ba7650f404af 629 \return Saturated value
aravindsv 0:ba7650f404af 630 */
aravindsv 0:ba7650f404af 631 #define __SSAT(ARG1,ARG2) \
aravindsv 0:ba7650f404af 632 ({ \
aravindsv 0:ba7650f404af 633 uint32_t __RES, __ARG1 = (ARG1); \
aravindsv 0:ba7650f404af 634 __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
aravindsv 0:ba7650f404af 635 __RES; \
aravindsv 0:ba7650f404af 636 })
aravindsv 0:ba7650f404af 637
aravindsv 0:ba7650f404af 638
aravindsv 0:ba7650f404af 639 /** \brief Unsigned Saturate
aravindsv 0:ba7650f404af 640
aravindsv 0:ba7650f404af 641 This function saturates an unsigned value.
aravindsv 0:ba7650f404af 642
aravindsv 0:ba7650f404af 643 \param [in] value Value to be saturated
aravindsv 0:ba7650f404af 644 \param [in] sat Bit position to saturate to (0..31)
aravindsv 0:ba7650f404af 645 \return Saturated value
aravindsv 0:ba7650f404af 646 */
aravindsv 0:ba7650f404af 647 #define __USAT(ARG1,ARG2) \
aravindsv 0:ba7650f404af 648 ({ \
aravindsv 0:ba7650f404af 649 uint32_t __RES, __ARG1 = (ARG1); \
aravindsv 0:ba7650f404af 650 __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
aravindsv 0:ba7650f404af 651 __RES; \
aravindsv 0:ba7650f404af 652 })
aravindsv 0:ba7650f404af 653
aravindsv 0:ba7650f404af 654
aravindsv 0:ba7650f404af 655 /** \brief Count leading zeros
aravindsv 0:ba7650f404af 656
aravindsv 0:ba7650f404af 657 This function counts the number of leading zeros of a data value.
aravindsv 0:ba7650f404af 658
aravindsv 0:ba7650f404af 659 \param [in] value Value to count the leading zeros
aravindsv 0:ba7650f404af 660 \return number of leading zeros in value
aravindsv 0:ba7650f404af 661 */
aravindsv 0:ba7650f404af 662 __attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __CLZ(uint32_t value)
aravindsv 0:ba7650f404af 663 {
aravindsv 0:ba7650f404af 664 uint32_t result;
aravindsv 0:ba7650f404af 665
aravindsv 0:ba7650f404af 666 __ASM volatile ("clz %0, %1" : "=r" (result) : "r" (value) );
aravindsv 0:ba7650f404af 667 return(result);
aravindsv 0:ba7650f404af 668 }
aravindsv 0:ba7650f404af 669
aravindsv 0:ba7650f404af 670 #endif /* (__CORTEX_M >= 0x03) */
aravindsv 0:ba7650f404af 671
aravindsv 0:ba7650f404af 672
aravindsv 0:ba7650f404af 673
aravindsv 0:ba7650f404af 674
aravindsv 0:ba7650f404af 675 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
aravindsv 0:ba7650f404af 676 /* TASKING carm specific functions */
aravindsv 0:ba7650f404af 677
aravindsv 0:ba7650f404af 678 /*
aravindsv 0:ba7650f404af 679 * The CMSIS functions have been implemented as intrinsics in the compiler.
aravindsv 0:ba7650f404af 680 * Please use "carm -?i" to get an up to date list of all intrinsics,
aravindsv 0:ba7650f404af 681 * Including the CMSIS ones.
aravindsv 0:ba7650f404af 682 */
aravindsv 0:ba7650f404af 683
aravindsv 0:ba7650f404af 684 #endif
aravindsv 0:ba7650f404af 685
aravindsv 0:ba7650f404af 686 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
aravindsv 0:ba7650f404af 687
aravindsv 0:ba7650f404af 688 #endif /* __CORE_CMINSTR_H */