mbed library sources

Fork of mbed-src by mbed official

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers core_cmSimd.h Source File

core_cmSimd.h

Go to the documentation of this file.
00001 /**************************************************************************//**
00002  * @file     core_cmSimd.h
00003  * @brief    CMSIS Cortex-M SIMD Header File
00004  * @version  V4.10
00005  * @date     18. March 2015
00006  *
00007  * @note
00008  *
00009  ******************************************************************************/
00010 /* Copyright (c) 2009 - 2014 ARM LIMITED
00011 
00012    All rights reserved.
00013    Redistribution and use in source and binary forms, with or without
00014    modification, are permitted provided that the following conditions are met:
00015    - Redistributions of source code must retain the above copyright
00016      notice, this list of conditions and the following disclaimer.
00017    - Redistributions in binary form must reproduce the above copyright
00018      notice, this list of conditions and the following disclaimer in the
00019      documentation and/or other materials provided with the distribution.
00020    - Neither the name of ARM nor the names of its contributors may be used
00021      to endorse or promote products derived from this software without
00022      specific prior written permission.
00023    *
00024    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00025    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00026    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00027    ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
00028    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00029    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00030    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00031    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00032    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00033    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00034    POSSIBILITY OF SUCH DAMAGE.
00035    ---------------------------------------------------------------------------*/
00036 
00037 
00038 #if defined ( __ICCARM__ )
00039  #pragma system_include  /* treat file as system include file for MISRA check */
00040 #endif
00041 
00042 #ifndef __CORE_CMSIMD_H
00043 #define __CORE_CMSIMD_H
00044 
00045 #ifdef __cplusplus
00046  extern "C" {
00047 #endif
00048 
00049 
00050 /*******************************************************************************
00051  *                Hardware Abstraction Layer
00052  ******************************************************************************/
00053 
00054 
00055 /* ###################  Compiler specific Intrinsics  ########################### */
00056 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
00057   Access to dedicated SIMD instructions
00058   @{
00059 */
00060 
00061 #if   defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
00062 /* ARM armcc specific functions */
00063 #define __SADD8                           __sadd8
00064 #define __QADD8                           __qadd8
00065 #define __SHADD8                          __shadd8
00066 #define __UADD8                           __uadd8
00067 #define __UQADD8                          __uqadd8
00068 #define __UHADD8                          __uhadd8
00069 #define __SSUB8                           __ssub8
00070 #define __QSUB8                           __qsub8
00071 #define __SHSUB8                          __shsub8
00072 #define __USUB8                           __usub8
00073 #define __UQSUB8                          __uqsub8
00074 #define __UHSUB8                          __uhsub8
00075 #define __SADD16                          __sadd16
00076 #define __QADD16                          __qadd16
00077 #define __SHADD16                         __shadd16
00078 #define __UADD16                          __uadd16
00079 #define __UQADD16                         __uqadd16
00080 #define __UHADD16                         __uhadd16
00081 #define __SSUB16                          __ssub16
00082 #define __QSUB16                          __qsub16
00083 #define __SHSUB16                         __shsub16
00084 #define __USUB16                          __usub16
00085 #define __UQSUB16                         __uqsub16
00086 #define __UHSUB16                         __uhsub16
00087 #define __SASX                            __sasx
00088 #define __QASX                            __qasx
00089 #define __SHASX                           __shasx
00090 #define __UASX                            __uasx
00091 #define __UQASX                           __uqasx
00092 #define __UHASX                           __uhasx
00093 #define __SSAX                            __ssax
00094 #define __QSAX                            __qsax
00095 #define __SHSAX                           __shsax
00096 #define __USAX                            __usax
00097 #define __UQSAX                           __uqsax
00098 #define __UHSAX                           __uhsax
00099 #define __USAD8                           __usad8
00100 #define __USADA8                          __usada8
00101 #define __SSAT16                          __ssat16
00102 #define __USAT16                          __usat16
00103 #define __UXTB16                          __uxtb16
00104 #define __UXTAB16                         __uxtab16
00105 #define __SXTB16                          __sxtb16
00106 #define __SXTAB16                         __sxtab16
00107 #define __SMUAD                           __smuad
00108 #define __SMUADX                          __smuadx
00109 #define __SMLAD                           __smlad
00110 #define __SMLADX                          __smladx
00111 #define __SMLALD                          __smlald
00112 #define __SMLALDX                         __smlaldx
00113 #define __SMUSD                           __smusd
00114 #define __SMUSDX                          __smusdx
00115 #define __SMLSD                           __smlsd
00116 #define __SMLSDX                          __smlsdx
00117 #define __SMLSLD                          __smlsld
00118 #define __SMLSLDX                         __smlsldx
00119 #define __SEL                             __sel
00120 #define __QADD                            __qadd
00121 #define __QSUB                            __qsub
00122 
00123 #define __PKHBT(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0x0000FFFFUL) |  \
00124                                            ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL)  )
00125 
00126 #define __PKHTB(ARG1,ARG2,ARG3)          ( ((((uint32_t)(ARG1))          ) & 0xFFFF0000UL) |  \
00127                                            ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL)  )
00128 
00129 #define __SMMLA(ARG1,ARG2,ARG3)          ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
00130                                                       ((int64_t)(ARG3) << 32)      ) >> 32))
00131 
00132 
00133 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
00134 /* GNU gcc specific functions */
00135 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
00136 {
00137   uint32_t result;
00138 
00139   __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00140   return(result);
00141 }
00142 
00143 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
00144 {
00145   uint32_t result;
00146 
00147   __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00148   return(result);
00149 }
00150 
00151 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
00152 {
00153   uint32_t result;
00154 
00155   __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00156   return(result);
00157 }
00158 
00159 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
00160 {
00161   uint32_t result;
00162 
00163   __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00164   return(result);
00165 }
00166 
00167 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
00168 {
00169   uint32_t result;
00170 
00171   __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00172   return(result);
00173 }
00174 
00175 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
00176 {
00177   uint32_t result;
00178 
00179   __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00180   return(result);
00181 }
00182 
00183 
00184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
00185 {
00186   uint32_t result;
00187 
00188   __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00189   return(result);
00190 }
00191 
00192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
00193 {
00194   uint32_t result;
00195 
00196   __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00197   return(result);
00198 }
00199 
00200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
00201 {
00202   uint32_t result;
00203 
00204   __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00205   return(result);
00206 }
00207 
00208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
00209 {
00210   uint32_t result;
00211 
00212   __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00213   return(result);
00214 }
00215 
00216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
00217 {
00218   uint32_t result;
00219 
00220   __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00221   return(result);
00222 }
00223 
00224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
00225 {
00226   uint32_t result;
00227 
00228   __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00229   return(result);
00230 }
00231 
00232 
00233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
00234 {
00235   uint32_t result;
00236 
00237   __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00238   return(result);
00239 }
00240 
00241 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
00242 {
00243   uint32_t result;
00244 
00245   __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00246   return(result);
00247 }
00248 
00249 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
00250 {
00251   uint32_t result;
00252 
00253   __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00254   return(result);
00255 }
00256 
00257 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
00258 {
00259   uint32_t result;
00260 
00261   __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00262   return(result);
00263 }
00264 
00265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
00266 {
00267   uint32_t result;
00268 
00269   __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00270   return(result);
00271 }
00272 
00273 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
00274 {
00275   uint32_t result;
00276 
00277   __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00278   return(result);
00279 }
00280 
00281 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
00282 {
00283   uint32_t result;
00284 
00285   __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00286   return(result);
00287 }
00288 
00289 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
00290 {
00291   uint32_t result;
00292 
00293   __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00294   return(result);
00295 }
00296 
00297 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
00298 {
00299   uint32_t result;
00300 
00301   __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00302   return(result);
00303 }
00304 
00305 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
00306 {
00307   uint32_t result;
00308 
00309   __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00310   return(result);
00311 }
00312 
00313 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
00314 {
00315   uint32_t result;
00316 
00317   __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00318   return(result);
00319 }
00320 
00321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
00322 {
00323   uint32_t result;
00324 
00325   __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00326   return(result);
00327 }
00328 
00329 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
00330 {
00331   uint32_t result;
00332 
00333   __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00334   return(result);
00335 }
00336 
00337 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
00338 {
00339   uint32_t result;
00340 
00341   __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00342   return(result);
00343 }
00344 
00345 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
00346 {
00347   uint32_t result;
00348 
00349   __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00350   return(result);
00351 }
00352 
00353 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
00354 {
00355   uint32_t result;
00356 
00357   __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00358   return(result);
00359 }
00360 
00361 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
00362 {
00363   uint32_t result;
00364 
00365   __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00366   return(result);
00367 }
00368 
00369 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
00370 {
00371   uint32_t result;
00372 
00373   __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00374   return(result);
00375 }
00376 
00377 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
00378 {
00379   uint32_t result;
00380 
00381   __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00382   return(result);
00383 }
00384 
00385 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
00386 {
00387   uint32_t result;
00388 
00389   __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00390   return(result);
00391 }
00392 
00393 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
00394 {
00395   uint32_t result;
00396 
00397   __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00398   return(result);
00399 }
00400 
00401 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
00402 {
00403   uint32_t result;
00404 
00405   __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00406   return(result);
00407 }
00408 
00409 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
00410 {
00411   uint32_t result;
00412 
00413   __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00414   return(result);
00415 }
00416 
00417 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
00418 {
00419   uint32_t result;
00420 
00421   __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00422   return(result);
00423 }
00424 
00425 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
00426 {
00427   uint32_t result;
00428 
00429   __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00430   return(result);
00431 }
00432 
00433 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
00434 {
00435   uint32_t result;
00436 
00437   __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00438   return(result);
00439 }
00440 
00441 #define __SSAT16(ARG1,ARG2) \
00442 ({                          \
00443   uint32_t __RES, __ARG1 = (ARG1); \
00444   __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00445   __RES; \
00446  })
00447 
00448 #define __USAT16(ARG1,ARG2) \
00449 ({                          \
00450   uint32_t __RES, __ARG1 = (ARG1); \
00451   __ASM ("usat16 %0, %1, %2" : "=r" (__RES) :  "I" (ARG2), "r" (__ARG1) ); \
00452   __RES; \
00453  })
00454 
00455 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
00456 {
00457   uint32_t result;
00458 
00459   __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
00460   return(result);
00461 }
00462 
00463 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
00464 {
00465   uint32_t result;
00466 
00467   __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00468   return(result);
00469 }
00470 
00471 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
00472 {
00473   uint32_t result;
00474 
00475   __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
00476   return(result);
00477 }
00478 
00479 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
00480 {
00481   uint32_t result;
00482 
00483   __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00484   return(result);
00485 }
00486 
00487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD  (uint32_t op1, uint32_t op2)
00488 {
00489   uint32_t result;
00490 
00491   __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00492   return(result);
00493 }
00494 
00495 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
00496 {
00497   uint32_t result;
00498 
00499   __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00500   return(result);
00501 }
00502 
00503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
00504 {
00505   uint32_t result;
00506 
00507   __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00508   return(result);
00509 }
00510 
00511 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
00512 {
00513   uint32_t result;
00514 
00515   __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00516   return(result);
00517 }
00518 
00519 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
00520 {
00521   union llreg_u{
00522     uint32_t w32[2];
00523     uint64_t w64;
00524   } llr;
00525   llr.w64 = acc;
00526 
00527 #ifndef __ARMEB__   // Little endian
00528   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
00529 #else               // Big endian
00530   __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
00531 #endif
00532 
00533   return(llr.w64);
00534 }
00535 
00536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
00537 {
00538   union llreg_u{
00539     uint32_t w32[2];
00540     uint64_t w64;
00541   } llr;
00542   llr.w64 = acc;
00543 
00544 #ifndef __ARMEB__   // Little endian
00545   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
00546 #else               // Big endian
00547   __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
00548 #endif
00549 
00550   return(llr.w64);
00551 }
00552 
00553 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD  (uint32_t op1, uint32_t op2)
00554 {
00555   uint32_t result;
00556 
00557   __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00558   return(result);
00559 }
00560 
00561 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
00562 {
00563   uint32_t result;
00564 
00565   __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00566   return(result);
00567 }
00568 
00569 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
00570 {
00571   uint32_t result;
00572 
00573   __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00574   return(result);
00575 }
00576 
00577 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
00578 {
00579   uint32_t result;
00580 
00581   __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
00582   return(result);
00583 }
00584 
00585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
00586 {
00587   union llreg_u{
00588     uint32_t w32[2];
00589     uint64_t w64;
00590   } llr;
00591   llr.w64 = acc;
00592 
00593 #ifndef __ARMEB__   // Little endian
00594   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
00595 #else               // Big endian
00596   __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
00597 #endif
00598 
00599   return(llr.w64);
00600 }
00601 
00602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
00603 {
00604   union llreg_u{
00605     uint32_t w32[2];
00606     uint64_t w64;
00607   } llr;
00608   llr.w64 = acc;
00609 
00610 #ifndef __ARMEB__   // Little endian
00611   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
00612 #else               // Big endian
00613   __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
00614 #endif
00615 
00616   return(llr.w64);
00617 }
00618 
00619 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL  (uint32_t op1, uint32_t op2)
00620 {
00621   uint32_t result;
00622 
00623   __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00624   return(result);
00625 }
00626 
00627 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
00628 {
00629   uint32_t result;
00630 
00631   __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00632   return(result);
00633 }
00634 
00635 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
00636 {
00637   uint32_t result;
00638 
00639   __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
00640   return(result);
00641 }
00642 
00643 #define __PKHBT(ARG1,ARG2,ARG3) \
00644 ({                          \
00645   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
00646   __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
00647   __RES; \
00648  })
00649 
00650 #define __PKHTB(ARG1,ARG2,ARG3) \
00651 ({                          \
00652   uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
00653   if (ARG3 == 0) \
00654     __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2)  ); \
00655   else \
00656     __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) :  "r" (__ARG1), "r" (__ARG2), "I" (ARG3)  ); \
00657   __RES; \
00658  })
00659 
00660 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
00661 {
00662  int32_t result;
00663 
00664  __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r"  (op1), "r" (op2), "r" (op3) );
00665  return(result);
00666 }
00667 
00668 
00669 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
00670 /* IAR iccarm specific functions */
00671 #include <cmsis_iar.h>
00672 
00673 
00674 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
00675 /* TI CCS specific functions */
00676 #include <cmsis_ccs.h>
00677 
00678 
00679 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
00680 /* TASKING carm specific functions */
00681 /* not yet supported */
00682 
00683 
00684 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
00685 /* Cosmic specific functions */
00686 #include <cmsis_csm.h>
00687 
00688 #endif
00689 
00690 /*@} end of group CMSIS_SIMD_intrinsics */
00691 
00692 
00693 #ifdef __cplusplus
00694 }
00695 #endif
00696 
00697 #endif /* __CORE_CMSIMD_H */