V4.0.1 of the ARM CMSIS DSP libraries. Note that arm_bitreversal2.s, arm_cfft_f32.c and arm_rfft_fast_f32.c had to be removed. arm_bitreversal2.s will not assemble with the online tools. So, the fast f32 FFT functions are not yet available. All the other FFT functions are available.

Dependents:   MPU9150_Example fir_f32 fir_f32 MPU9150_nucleo_noni2cdev ... more

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers core_cmInstr.h Source File

core_cmInstr.h

Go to the documentation of this file.
00001 /**************************************************************************//**
00002  * @file     core_cmInstr.h
00003  * @brief    CMSIS Cortex-M Core Instruction Access Header File
00004  * @version  V3.30
00005  * @date     17. February 2014
00006  *
00007  * @note
00008  *
00009  ******************************************************************************/
00010 /* Copyright (c) 2009 - 2014 ARM LIMITED
00011 
00012    All rights reserved.
00013    Redistribution and use in source and binary forms, with or without
00014    modification, are permitted provided that the following conditions are met:
00015    - Redistributions of source code must retain the above copyright
00016      notice, this list of conditions and the following disclaimer.
00017    - Redistributions in binary form must reproduce the above copyright
00018      notice, this list of conditions and the following disclaimer in the
00019      documentation and/or other materials provided with the distribution.
00020    - Neither the name of ARM nor the names of its contributors may be used
00021      to endorse or promote products derived from this software without
00022      specific prior written permission.
00023    *
00024    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00025    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00026    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00027    ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
00028    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00029    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00030    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00031    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00032    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00033    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00034    POSSIBILITY OF SUCH DAMAGE.
00035    ---------------------------------------------------------------------------*/
00036 
00037 
00038 #ifndef __CORE_CMINSTR_H
00039 #define __CORE_CMINSTR_H
00040 
00041 
00042 /* ##########################  Core Instruction Access  ######################### */
00043 /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
00044   Access to dedicated instructions
00045   @{
00046 */
00047 
00048 #if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
00049 /* ARM armcc specific functions */
00050 
00051 #if (__ARMCC_VERSION < 400677)
00052   #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
00053 #endif
00054 
00055 
00056 /** \brief  No Operation
00057 
00058     No Operation does nothing. This instruction can be used for code alignment purposes.
00059  */
00060 #define __NOP                             __nop
00061 
00062 
00063 /** \brief  Wait For Interrupt
00064 
00065     Wait For Interrupt is a hint instruction that suspends execution
00066     until one of a number of events occurs.
00067  */
00068 #define __WFI                             __wfi
00069 
00070 
00071 /** \brief  Wait For Event
00072 
00073     Wait For Event is a hint instruction that permits the processor to enter
00074     a low-power state until one of a number of events occurs.
00075  */
00076 #define __WFE                             __wfe
00077 
00078 
00079 /** \brief  Send Event
00080 
00081     Send Event is a hint instruction. It causes an event to be signaled to the CPU.
00082  */
00083 #define __SEV                             __sev
00084 
00085 
00086 /** \brief  Instruction Synchronization Barrier
00087 
00088     Instruction Synchronization Barrier flushes the pipeline in the processor,
00089     so that all instructions following the ISB are fetched from cache or
00090     memory, after the instruction has been completed.
00091  */
00092 #define __ISB()                           __isb(0xF)
00093 
00094 
00095 /** \brief  Data Synchronization Barrier
00096 
00097     This function acts as a special kind of Data Memory Barrier.
00098     It completes when all explicit memory accesses before this instruction complete.
00099  */
00100 #define __DSB()                           __dsb(0xF)
00101 
00102 
00103 /** \brief  Data Memory Barrier
00104 
00105     This function ensures the apparent order of the explicit memory operations before
00106     and after the instruction, without ensuring their completion.
00107  */
00108 #define __DMB()                           __dmb(0xF)
00109 
00110 
00111 /** \brief  Reverse byte order (32 bit)
00112 
00113     This function reverses the byte order in integer value.
00114 
00115     \param [in]    value  Value to reverse
00116     \return               Reversed value
00117  */
00118 #define __REV                             __rev
00119 
00120 
00121 /** \brief  Reverse byte order (16 bit)
00122 
00123     This function reverses the byte order in two unsigned short values.
00124 
00125     \param [in]    value  Value to reverse
00126     \return               Reversed value
00127  */
00128 #ifndef __NO_EMBEDDED_ASM
00129 __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
00130 {
00131   rev16 r0, r0
00132   bx lr
00133 }
00134 #endif
00135 
00136 /** \brief  Reverse byte order in signed short value
00137 
00138     This function reverses the byte order in a signed short value with sign extension to integer.
00139 
00140     \param [in]    value  Value to reverse
00141     \return               Reversed value
00142  */
00143 #ifndef __NO_EMBEDDED_ASM
00144 __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
00145 {
00146   revsh r0, r0
00147   bx lr
00148 }
00149 #endif
00150 
00151 
00152 /** \brief  Rotate Right in unsigned value (32 bit)
00153 
00154     This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
00155 
00156     \param [in]    value  Value to rotate
00157     \param [in]    value  Number of Bits to rotate
00158     \return               Rotated value
00159  */
00160 #define __ROR                             __ror
00161 
00162 
00163 /** \brief  Breakpoint
00164 
00165     This function causes the processor to enter Debug state.
00166     Debug tools can use this to investigate system state when the instruction at a particular address is reached.
00167 
00168     \param [in]    value  is ignored by the processor.
00169                    If required, a debugger can use it to store additional information about the breakpoint.
00170  */
00171 #define __BKPT(value)                       __breakpoint(value)
00172 
00173 
00174 #if       (__CORTEX_M >= 0x03)
00175 
00176 /** \brief  Reverse bit order of value
00177 
00178     This function reverses the bit order of the given value.
00179 
00180     \param [in]    value  Value to reverse
00181     \return               Reversed value
00182  */
00183 #define __RBIT                            __rbit
00184 
00185 
00186 /** \brief  LDR Exclusive (8 bit)
00187 
00188     This function performs a exclusive LDR command for 8 bit value.
00189 
00190     \param [in]    ptr  Pointer to data
00191     \return             value of type uint8_t at (*ptr)
00192  */
00193 #define __LDREXB(ptr)                     ((uint8_t ) __ldrex(ptr))
00194 
00195 
00196 /** \brief  LDR Exclusive (16 bit)
00197 
00198     This function performs a exclusive LDR command for 16 bit values.
00199 
00200     \param [in]    ptr  Pointer to data
00201     \return        value of type uint16_t at (*ptr)
00202  */
00203 #define __LDREXH(ptr)                     ((uint16_t) __ldrex(ptr))
00204 
00205 
00206 /** \brief  LDR Exclusive (32 bit)
00207 
00208     This function performs a exclusive LDR command for 32 bit values.
00209 
00210     \param [in]    ptr  Pointer to data
00211     \return        value of type uint32_t at (*ptr)
00212  */
00213 #define __LDREXW(ptr)                     ((uint32_t ) __ldrex(ptr))
00214 
00215 
00216 /** \brief  STR Exclusive (8 bit)
00217 
00218     This function performs a exclusive STR command for 8 bit values.
00219 
00220     \param [in]  value  Value to store
00221     \param [in]    ptr  Pointer to location
00222     \return          0  Function succeeded
00223     \return          1  Function failed
00224  */
00225 #define __STREXB(value, ptr)              __strex(value, ptr)
00226 
00227 
00228 /** \brief  STR Exclusive (16 bit)
00229 
00230     This function performs a exclusive STR command for 16 bit values.
00231 
00232     \param [in]  value  Value to store
00233     \param [in]    ptr  Pointer to location
00234     \return          0  Function succeeded
00235     \return          1  Function failed
00236  */
00237 #define __STREXH(value, ptr)              __strex(value, ptr)
00238 
00239 
00240 /** \brief  STR Exclusive (32 bit)
00241 
00242     This function performs a exclusive STR command for 32 bit values.
00243 
00244     \param [in]  value  Value to store
00245     \param [in]    ptr  Pointer to location
00246     \return          0  Function succeeded
00247     \return          1  Function failed
00248  */
00249 #define __STREXW(value, ptr)              __strex(value, ptr)
00250 
00251 
00252 /** \brief  Remove the exclusive lock
00253 
00254     This function removes the exclusive lock which is created by LDREX.
00255 
00256  */
00257 #define __CLREX                           __clrex
00258 
00259 
00260 /** \brief  Signed Saturate
00261 
00262     This function saturates a signed value.
00263 
00264     \param [in]  value  Value to be saturated
00265     \param [in]    sat  Bit position to saturate to (1..32)
00266     \return             Saturated value
00267  */
00268 #define __SSAT                            __ssat
00269 
00270 
00271 /** \brief  Unsigned Saturate
00272 
00273     This function saturates an unsigned value.
00274 
00275     \param [in]  value  Value to be saturated
00276     \param [in]    sat  Bit position to saturate to (0..31)
00277     \return             Saturated value
00278  */
00279 #define __USAT                            __usat
00280 
00281 
00282 /** \brief  Count leading zeros
00283 
00284     This function counts the number of leading zeros of a data value.
00285 
00286     \param [in]  value  Value to count the leading zeros
00287     \return             number of leading zeros in value
00288  */
00289 #define __CLZ                             __clz
00290 
00291 #endif /* (__CORTEX_M >= 0x03) */
00292 
00293 
00294 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
00295 /* GNU gcc specific functions */
00296 
00297 /* Define macros for porting to both thumb1 and thumb2.
00298  * For thumb1, use low register (r0-r7), specified by constrant "l"
00299  * Otherwise, use general registers, specified by constrant "r" */
00300 #if defined (__thumb__) && !defined (__thumb2__)
00301 #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
00302 #define __CMSIS_GCC_USE_REG(r) "l" (r)
00303 #else
00304 #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
00305 #define __CMSIS_GCC_USE_REG(r) "r" (r)
00306 #endif
00307 
00308 /** \brief  No Operation
00309 
00310     No Operation does nothing. This instruction can be used for code alignment purposes.
00311  */
00312 __attribute__( ( always_inline ) ) __STATIC_INLINE void __NOP(void)
00313 {
00314   __ASM volatile ("nop");
00315 }
00316 
00317 
00318 /** \brief  Wait For Interrupt
00319 
00320     Wait For Interrupt is a hint instruction that suspends execution
00321     until one of a number of events occurs.
00322  */
00323 __attribute__( ( always_inline ) ) __STATIC_INLINE void __WFI(void)
00324 {
00325   __ASM volatile ("wfi");
00326 }
00327 
00328 
00329 /** \brief  Wait For Event
00330 
00331     Wait For Event is a hint instruction that permits the processor to enter
00332     a low-power state until one of a number of events occurs.
00333  */
00334 __attribute__( ( always_inline ) ) __STATIC_INLINE void __WFE(void)
00335 {
00336   __ASM volatile ("wfe");
00337 }
00338 
00339 
00340 /** \brief  Send Event
00341 
00342     Send Event is a hint instruction. It causes an event to be signaled to the CPU.
00343  */
00344 __attribute__( ( always_inline ) ) __STATIC_INLINE void __SEV(void)
00345 {
00346   __ASM volatile ("sev");
00347 }
00348 
00349 
00350 /** \brief  Instruction Synchronization Barrier
00351 
00352     Instruction Synchronization Barrier flushes the pipeline in the processor,
00353     so that all instructions following the ISB are fetched from cache or
00354     memory, after the instruction has been completed.
00355  */
00356 __attribute__( ( always_inline ) ) __STATIC_INLINE void __ISB(void)
00357 {
00358   __ASM volatile ("isb");
00359 }
00360 
00361 
00362 /** \brief  Data Synchronization Barrier
00363 
00364     This function acts as a special kind of Data Memory Barrier.
00365     It completes when all explicit memory accesses before this instruction complete.
00366  */
00367 __attribute__( ( always_inline ) ) __STATIC_INLINE void __DSB(void)
00368 {
00369   __ASM volatile ("dsb");
00370 }
00371 
00372 
00373 /** \brief  Data Memory Barrier
00374 
00375     This function ensures the apparent order of the explicit memory operations before
00376     and after the instruction, without ensuring their completion.
00377  */
00378 __attribute__( ( always_inline ) ) __STATIC_INLINE void __DMB(void)
00379 {
00380   __ASM volatile ("dmb");
00381 }
00382 
00383 
00384 /** \brief  Reverse byte order (32 bit)
00385 
00386     This function reverses the byte order in integer value.
00387 
00388     \param [in]    value  Value to reverse
00389     \return               Reversed value
00390  */
00391 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV(uint32_t value)
00392 {
00393 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
00394   return __builtin_bswap32(value);
00395 #else
00396   uint32_t result;
00397 
00398   __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
00399   return(result);
00400 #endif
00401 }
00402 
00403 
00404 /** \brief  Reverse byte order (16 bit)
00405 
00406     This function reverses the byte order in two unsigned short values.
00407 
00408     \param [in]    value  Value to reverse
00409     \return               Reversed value
00410  */
00411 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __REV16(uint32_t value)
00412 {
00413   uint32_t result;
00414 
00415   __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
00416   return(result);
00417 }
00418 
00419 
00420 /** \brief  Reverse byte order in signed short value
00421 
00422     This function reverses the byte order in a signed short value with sign extension to integer.
00423 
00424     \param [in]    value  Value to reverse
00425     \return               Reversed value
00426  */
00427 __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __REVSH(int32_t value)
00428 {
00429 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
00430   return (short)__builtin_bswap16(value);
00431 #else
00432   uint32_t result;
00433 
00434   __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
00435   return(result);
00436 #endif
00437 }
00438 
00439 
00440 /** \brief  Rotate Right in unsigned value (32 bit)
00441 
00442     This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
00443 
00444     \param [in]    value  Value to rotate
00445     \param [in]    value  Number of Bits to rotate
00446     \return               Rotated value
00447  */
00448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
00449 {
00450   return (op1 >> op2) | (op1 << (32 - op2)); 
00451 }
00452 
00453 
00454 /** \brief  Breakpoint
00455 
00456     This function causes the processor to enter Debug state.
00457     Debug tools can use this to investigate system state when the instruction at a particular address is reached.
00458 
00459     \param [in]    value  is ignored by the processor.
00460                    If required, a debugger can use it to store additional information about the breakpoint.
00461  */
00462 #define __BKPT(value)                       __ASM volatile ("bkpt "#value)
00463 
00464 
00465 #if       (__CORTEX_M >= 0x03)
00466 
00467 /** \brief  Reverse bit order of value
00468 
00469     This function reverses the bit order of the given value.
00470 
00471     \param [in]    value  Value to reverse
00472     \return               Reversed value
00473  */
00474 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
00475 {
00476   uint32_t result;
00477 
00478    __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
00479    return(result);
00480 }
00481 
00482 
00483 /** \brief  LDR Exclusive (8 bit)
00484 
00485     This function performs a exclusive LDR command for 8 bit value.
00486 
00487     \param [in]    ptr  Pointer to data
00488     \return             value of type uint8_t at (*ptr)
00489  */
00490 __attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
00491 {
00492     uint32_t result;
00493 
00494 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
00495    __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
00496 #else
00497     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
00498        accepted by assembler. So has to use following less efficient pattern.
00499     */
00500    __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
00501 #endif
00502    return ((uint8_t) result);    /* Add explicit type cast here */
00503 }
00504 
00505 
00506 /** \brief  LDR Exclusive (16 bit)
00507 
00508     This function performs a exclusive LDR command for 16 bit values.
00509 
00510     \param [in]    ptr  Pointer to data
00511     \return        value of type uint16_t at (*ptr)
00512  */
00513 __attribute__( ( always_inline ) ) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
00514 {
00515     uint32_t result;
00516 
00517 #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
00518    __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
00519 #else
00520     /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
00521        accepted by assembler. So has to use following less efficient pattern.
00522     */
00523    __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
00524 #endif
00525    return ((uint16_t) result);    /* Add explicit type cast here */
00526 }
00527 
00528 
00529 /** \brief  LDR Exclusive (32 bit)
00530 
00531     This function performs a exclusive LDR command for 32 bit values.
00532 
00533     \param [in]    ptr  Pointer to data
00534     \return        value of type uint32_t at (*ptr)
00535  */
00536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
00537 {
00538     uint32_t result;
00539 
00540    __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
00541    return(result);
00542 }
00543 
00544 
00545 /** \brief  STR Exclusive (8 bit)
00546 
00547     This function performs a exclusive STR command for 8 bit values.
00548 
00549     \param [in]  value  Value to store
00550     \param [in]    ptr  Pointer to location
00551     \return          0  Function succeeded
00552     \return          1  Function failed
00553  */
00554 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
00555 {
00556    uint32_t result;
00557 
00558    __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
00559    return(result);
00560 }
00561 
00562 
00563 /** \brief  STR Exclusive (16 bit)
00564 
00565     This function performs a exclusive STR command for 16 bit values.
00566 
00567     \param [in]  value  Value to store
00568     \param [in]    ptr  Pointer to location
00569     \return          0  Function succeeded
00570     \return          1  Function failed
00571  */
00572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
00573 {
00574    uint32_t result;
00575 
00576    __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
00577    return(result);
00578 }
00579 
00580 
00581 /** \brief  STR Exclusive (32 bit)
00582 
00583     This function performs a exclusive STR command for 32 bit values.
00584 
00585     \param [in]  value  Value to store
00586     \param [in]    ptr  Pointer to location
00587     \return          0  Function succeeded
00588     \return          1  Function failed
00589  */
00590 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
00591 {
00592    uint32_t result;
00593 
00594    __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
00595    return(result);
00596 }
00597 
00598 
00599 /** \brief  Remove the exclusive lock
00600 
00601     This function removes the exclusive lock which is created by LDREX.
00602 
00603  */
00604 __attribute__( ( always_inline ) ) __STATIC_INLINE void __CLREX(void)
00605 {
00606   __ASM volatile ("clrex" ::: "memory");
00607 }
00608 
00609 
00610 /** \brief  Signed Saturate
00611 
00612     This function saturates a signed value.
00613 
00614     \param [in]  value  Value to be saturated
00615     \param [in]    sat  Bit position to saturate to (1..32)
00616     \return             Saturated value
00617  */
00618 #define __SSAT(ARG1,ARG2) \
00619 ({                          \
00620   uint32_t __RES, __ARG1 = (ARG1); \
00621   __ASM ("ssat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00622   __RES; \
00623  })
00624 
00625 
00626 /** \brief  Unsigned Saturate
00627 
00628     This function saturates an unsigned value.
00629 
00630     \param [in]  value  Value to be saturated
00631     \param [in]    sat  Bit position to saturate to (0..31)
00632     \return             Saturated value
00633  */
00634 #define __USAT(ARG1,ARG2) \
00635 ({                          \
00636   uint32_t __RES, __ARG1 = (ARG1); \
00637   __ASM ("usat %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00638   __RES; \
00639  })
00640 
00641 
00642 /** \brief  Count leading zeros
00643 
00644     This function counts the number of leading zeros of a data value.
00645 
00646     \param [in]  value  Value to count the leading zeros
00647     \return             number of leading zeros in value
00648  */
00649 __attribute__( ( always_inline ) ) __STATIC_INLINE uint8_t __CLZ(uint32_t value)
00650 {
00651   uint32_t result;
00652 
00653   __ASM volatile ("clz %0, %1" : "=r" (result) : "r" (value) );
00654    return ((uint8_t) result);    /* Add explicit type cast here */
00655 }
00656 
00657 #endif /* (__CORTEX_M >= 0x03) */
00658 
00659 
00660 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
00661 /* IAR iccarm specific functions */
00662 #include <cmsis_iar.h>
00663 
00664 
00665 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
00666 /* TI CCS specific functions */
00667 #include <cmsis_ccs.h>
00668 
00669 
00670 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
00671 /* TASKING carm specific functions */
00672 /*
00673  * The CMSIS functions have been implemented as intrinsics in the compiler.
00674  * Please use "carm -?i" to get an up to date list of all intrinsics,
00675  * Including the CMSIS ones.
00676  */
00677 
00678 
00679 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
00680 /* Cosmic specific functions */
00681 #include <cmsis_csm.h>
00682 
00683 #endif
00684 
00685 /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
00686 
00687 #endif /* __CORE_CMINSTR_H */