The official Mbed 2 C/C++ SDK provides the software platform and libraries to build your applications.

Dependents:   hello SerialTestv11 SerialTestv12 Sierpinski ... more

mbed 2

This is the mbed 2 library. If you'd like to learn about Mbed OS please see the mbed-os docs.

Committer:
emilmont
Date:
Tue Feb 18 15:02:39 2014 +0000
Revision:
78:ed8466a608b4
Add KL05Z Target
Fix LPC11XX InterruptIn
Fix NUCLEO boards us_ticker
Fix NUCLEO_L152RE AnalogOut

Who changed what in which revision?

UserRevisionLine numberNew contents of line
emilmont 78:ed8466a608b4 1 /**************************************************************************//**
emilmont 78:ed8466a608b4 2 * @file core_cm4_simd.h
emilmont 78:ed8466a608b4 3 * @brief CMSIS Cortex-M4 SIMD Header File
emilmont 78:ed8466a608b4 4 * @version V3.20
emilmont 78:ed8466a608b4 5 * @date 25. February 2013
emilmont 78:ed8466a608b4 6 *
emilmont 78:ed8466a608b4 7 * @note
emilmont 78:ed8466a608b4 8 *
emilmont 78:ed8466a608b4 9 ******************************************************************************/
emilmont 78:ed8466a608b4 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
emilmont 78:ed8466a608b4 11
emilmont 78:ed8466a608b4 12 All rights reserved.
emilmont 78:ed8466a608b4 13 Redistribution and use in source and binary forms, with or without
emilmont 78:ed8466a608b4 14 modification, are permitted provided that the following conditions are met:
emilmont 78:ed8466a608b4 15 - Redistributions of source code must retain the above copyright
emilmont 78:ed8466a608b4 16 notice, this list of conditions and the following disclaimer.
emilmont 78:ed8466a608b4 17 - Redistributions in binary form must reproduce the above copyright
emilmont 78:ed8466a608b4 18 notice, this list of conditions and the following disclaimer in the
emilmont 78:ed8466a608b4 19 documentation and/or other materials provided with the distribution.
emilmont 78:ed8466a608b4 20 - Neither the name of ARM nor the names of its contributors may be used
emilmont 78:ed8466a608b4 21 to endorse or promote products derived from this software without
emilmont 78:ed8466a608b4 22 specific prior written permission.
emilmont 78:ed8466a608b4 23 *
emilmont 78:ed8466a608b4 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
emilmont 78:ed8466a608b4 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
emilmont 78:ed8466a608b4 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
emilmont 78:ed8466a608b4 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
emilmont 78:ed8466a608b4 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
emilmont 78:ed8466a608b4 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
emilmont 78:ed8466a608b4 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
emilmont 78:ed8466a608b4 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
emilmont 78:ed8466a608b4 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
emilmont 78:ed8466a608b4 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
emilmont 78:ed8466a608b4 34 POSSIBILITY OF SUCH DAMAGE.
emilmont 78:ed8466a608b4 35 ---------------------------------------------------------------------------*/
emilmont 78:ed8466a608b4 36
emilmont 78:ed8466a608b4 37
emilmont 78:ed8466a608b4 38 #ifdef __cplusplus
emilmont 78:ed8466a608b4 39 extern "C" {
emilmont 78:ed8466a608b4 40 #endif
emilmont 78:ed8466a608b4 41
emilmont 78:ed8466a608b4 42 #ifndef __CORE_CM4_SIMD_H
emilmont 78:ed8466a608b4 43 #define __CORE_CM4_SIMD_H
emilmont 78:ed8466a608b4 44
emilmont 78:ed8466a608b4 45
emilmont 78:ed8466a608b4 46 /*******************************************************************************
emilmont 78:ed8466a608b4 47 * Hardware Abstraction Layer
emilmont 78:ed8466a608b4 48 ******************************************************************************/
emilmont 78:ed8466a608b4 49
emilmont 78:ed8466a608b4 50
emilmont 78:ed8466a608b4 51 /* ################### Compiler specific Intrinsics ########################### */
emilmont 78:ed8466a608b4 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
emilmont 78:ed8466a608b4 53 Access to dedicated SIMD instructions
emilmont 78:ed8466a608b4 54 @{
emilmont 78:ed8466a608b4 55 */
emilmont 78:ed8466a608b4 56
emilmont 78:ed8466a608b4 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
emilmont 78:ed8466a608b4 58 /* ARM armcc specific functions */
emilmont 78:ed8466a608b4 59
emilmont 78:ed8466a608b4 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 61 #define __SADD8 __sadd8
emilmont 78:ed8466a608b4 62 #define __QADD8 __qadd8
emilmont 78:ed8466a608b4 63 #define __SHADD8 __shadd8
emilmont 78:ed8466a608b4 64 #define __UADD8 __uadd8
emilmont 78:ed8466a608b4 65 #define __UQADD8 __uqadd8
emilmont 78:ed8466a608b4 66 #define __UHADD8 __uhadd8
emilmont 78:ed8466a608b4 67 #define __SSUB8 __ssub8
emilmont 78:ed8466a608b4 68 #define __QSUB8 __qsub8
emilmont 78:ed8466a608b4 69 #define __SHSUB8 __shsub8
emilmont 78:ed8466a608b4 70 #define __USUB8 __usub8
emilmont 78:ed8466a608b4 71 #define __UQSUB8 __uqsub8
emilmont 78:ed8466a608b4 72 #define __UHSUB8 __uhsub8
emilmont 78:ed8466a608b4 73 #define __SADD16 __sadd16
emilmont 78:ed8466a608b4 74 #define __QADD16 __qadd16
emilmont 78:ed8466a608b4 75 #define __SHADD16 __shadd16
emilmont 78:ed8466a608b4 76 #define __UADD16 __uadd16
emilmont 78:ed8466a608b4 77 #define __UQADD16 __uqadd16
emilmont 78:ed8466a608b4 78 #define __UHADD16 __uhadd16
emilmont 78:ed8466a608b4 79 #define __SSUB16 __ssub16
emilmont 78:ed8466a608b4 80 #define __QSUB16 __qsub16
emilmont 78:ed8466a608b4 81 #define __SHSUB16 __shsub16
emilmont 78:ed8466a608b4 82 #define __USUB16 __usub16
emilmont 78:ed8466a608b4 83 #define __UQSUB16 __uqsub16
emilmont 78:ed8466a608b4 84 #define __UHSUB16 __uhsub16
emilmont 78:ed8466a608b4 85 #define __SASX __sasx
emilmont 78:ed8466a608b4 86 #define __QASX __qasx
emilmont 78:ed8466a608b4 87 #define __SHASX __shasx
emilmont 78:ed8466a608b4 88 #define __UASX __uasx
emilmont 78:ed8466a608b4 89 #define __UQASX __uqasx
emilmont 78:ed8466a608b4 90 #define __UHASX __uhasx
emilmont 78:ed8466a608b4 91 #define __SSAX __ssax
emilmont 78:ed8466a608b4 92 #define __QSAX __qsax
emilmont 78:ed8466a608b4 93 #define __SHSAX __shsax
emilmont 78:ed8466a608b4 94 #define __USAX __usax
emilmont 78:ed8466a608b4 95 #define __UQSAX __uqsax
emilmont 78:ed8466a608b4 96 #define __UHSAX __uhsax
emilmont 78:ed8466a608b4 97 #define __USAD8 __usad8
emilmont 78:ed8466a608b4 98 #define __USADA8 __usada8
emilmont 78:ed8466a608b4 99 #define __SSAT16 __ssat16
emilmont 78:ed8466a608b4 100 #define __USAT16 __usat16
emilmont 78:ed8466a608b4 101 #define __UXTB16 __uxtb16
emilmont 78:ed8466a608b4 102 #define __UXTAB16 __uxtab16
emilmont 78:ed8466a608b4 103 #define __SXTB16 __sxtb16
emilmont 78:ed8466a608b4 104 #define __SXTAB16 __sxtab16
emilmont 78:ed8466a608b4 105 #define __SMUAD __smuad
emilmont 78:ed8466a608b4 106 #define __SMUADX __smuadx
emilmont 78:ed8466a608b4 107 #define __SMLAD __smlad
emilmont 78:ed8466a608b4 108 #define __SMLADX __smladx
emilmont 78:ed8466a608b4 109 #define __SMLALD __smlald
emilmont 78:ed8466a608b4 110 #define __SMLALDX __smlaldx
emilmont 78:ed8466a608b4 111 #define __SMUSD __smusd
emilmont 78:ed8466a608b4 112 #define __SMUSDX __smusdx
emilmont 78:ed8466a608b4 113 #define __SMLSD __smlsd
emilmont 78:ed8466a608b4 114 #define __SMLSDX __smlsdx
emilmont 78:ed8466a608b4 115 #define __SMLSLD __smlsld
emilmont 78:ed8466a608b4 116 #define __SMLSLDX __smlsldx
emilmont 78:ed8466a608b4 117 #define __SEL __sel
emilmont 78:ed8466a608b4 118 #define __QADD __qadd
emilmont 78:ed8466a608b4 119 #define __QSUB __qsub
emilmont 78:ed8466a608b4 120
emilmont 78:ed8466a608b4 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
emilmont 78:ed8466a608b4 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
emilmont 78:ed8466a608b4 123
emilmont 78:ed8466a608b4 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
emilmont 78:ed8466a608b4 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
emilmont 78:ed8466a608b4 126
emilmont 78:ed8466a608b4 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
emilmont 78:ed8466a608b4 128 ((int64_t)(ARG3) << 32) ) >> 32))
emilmont 78:ed8466a608b4 129
emilmont 78:ed8466a608b4 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 131
emilmont 78:ed8466a608b4 132
emilmont 78:ed8466a608b4 133
emilmont 78:ed8466a608b4 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
emilmont 78:ed8466a608b4 135 /* IAR iccarm specific functions */
emilmont 78:ed8466a608b4 136
emilmont 78:ed8466a608b4 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 138 #include <cmsis_iar.h>
emilmont 78:ed8466a608b4 139
emilmont 78:ed8466a608b4 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 141
emilmont 78:ed8466a608b4 142
emilmont 78:ed8466a608b4 143
emilmont 78:ed8466a608b4 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
emilmont 78:ed8466a608b4 145 /* TI CCS specific functions */
emilmont 78:ed8466a608b4 146
emilmont 78:ed8466a608b4 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 148 #include <cmsis_ccs.h>
emilmont 78:ed8466a608b4 149
emilmont 78:ed8466a608b4 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 151
emilmont 78:ed8466a608b4 152
emilmont 78:ed8466a608b4 153
emilmont 78:ed8466a608b4 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
emilmont 78:ed8466a608b4 155 /* GNU gcc specific functions */
emilmont 78:ed8466a608b4 156
emilmont 78:ed8466a608b4 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 159 {
emilmont 78:ed8466a608b4 160 uint32_t result;
emilmont 78:ed8466a608b4 161
emilmont 78:ed8466a608b4 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 163 return(result);
emilmont 78:ed8466a608b4 164 }
emilmont 78:ed8466a608b4 165
emilmont 78:ed8466a608b4 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 167 {
emilmont 78:ed8466a608b4 168 uint32_t result;
emilmont 78:ed8466a608b4 169
emilmont 78:ed8466a608b4 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 171 return(result);
emilmont 78:ed8466a608b4 172 }
emilmont 78:ed8466a608b4 173
emilmont 78:ed8466a608b4 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 175 {
emilmont 78:ed8466a608b4 176 uint32_t result;
emilmont 78:ed8466a608b4 177
emilmont 78:ed8466a608b4 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 179 return(result);
emilmont 78:ed8466a608b4 180 }
emilmont 78:ed8466a608b4 181
emilmont 78:ed8466a608b4 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 183 {
emilmont 78:ed8466a608b4 184 uint32_t result;
emilmont 78:ed8466a608b4 185
emilmont 78:ed8466a608b4 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 187 return(result);
emilmont 78:ed8466a608b4 188 }
emilmont 78:ed8466a608b4 189
emilmont 78:ed8466a608b4 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 191 {
emilmont 78:ed8466a608b4 192 uint32_t result;
emilmont 78:ed8466a608b4 193
emilmont 78:ed8466a608b4 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 195 return(result);
emilmont 78:ed8466a608b4 196 }
emilmont 78:ed8466a608b4 197
emilmont 78:ed8466a608b4 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 199 {
emilmont 78:ed8466a608b4 200 uint32_t result;
emilmont 78:ed8466a608b4 201
emilmont 78:ed8466a608b4 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 203 return(result);
emilmont 78:ed8466a608b4 204 }
emilmont 78:ed8466a608b4 205
emilmont 78:ed8466a608b4 206
emilmont 78:ed8466a608b4 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 208 {
emilmont 78:ed8466a608b4 209 uint32_t result;
emilmont 78:ed8466a608b4 210
emilmont 78:ed8466a608b4 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 212 return(result);
emilmont 78:ed8466a608b4 213 }
emilmont 78:ed8466a608b4 214
emilmont 78:ed8466a608b4 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 216 {
emilmont 78:ed8466a608b4 217 uint32_t result;
emilmont 78:ed8466a608b4 218
emilmont 78:ed8466a608b4 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 220 return(result);
emilmont 78:ed8466a608b4 221 }
emilmont 78:ed8466a608b4 222
emilmont 78:ed8466a608b4 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 224 {
emilmont 78:ed8466a608b4 225 uint32_t result;
emilmont 78:ed8466a608b4 226
emilmont 78:ed8466a608b4 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 228 return(result);
emilmont 78:ed8466a608b4 229 }
emilmont 78:ed8466a608b4 230
emilmont 78:ed8466a608b4 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 232 {
emilmont 78:ed8466a608b4 233 uint32_t result;
emilmont 78:ed8466a608b4 234
emilmont 78:ed8466a608b4 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 236 return(result);
emilmont 78:ed8466a608b4 237 }
emilmont 78:ed8466a608b4 238
emilmont 78:ed8466a608b4 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 240 {
emilmont 78:ed8466a608b4 241 uint32_t result;
emilmont 78:ed8466a608b4 242
emilmont 78:ed8466a608b4 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 244 return(result);
emilmont 78:ed8466a608b4 245 }
emilmont 78:ed8466a608b4 246
emilmont 78:ed8466a608b4 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 248 {
emilmont 78:ed8466a608b4 249 uint32_t result;
emilmont 78:ed8466a608b4 250
emilmont 78:ed8466a608b4 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 252 return(result);
emilmont 78:ed8466a608b4 253 }
emilmont 78:ed8466a608b4 254
emilmont 78:ed8466a608b4 255
emilmont 78:ed8466a608b4 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 257 {
emilmont 78:ed8466a608b4 258 uint32_t result;
emilmont 78:ed8466a608b4 259
emilmont 78:ed8466a608b4 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 261 return(result);
emilmont 78:ed8466a608b4 262 }
emilmont 78:ed8466a608b4 263
emilmont 78:ed8466a608b4 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 265 {
emilmont 78:ed8466a608b4 266 uint32_t result;
emilmont 78:ed8466a608b4 267
emilmont 78:ed8466a608b4 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 269 return(result);
emilmont 78:ed8466a608b4 270 }
emilmont 78:ed8466a608b4 271
emilmont 78:ed8466a608b4 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 273 {
emilmont 78:ed8466a608b4 274 uint32_t result;
emilmont 78:ed8466a608b4 275
emilmont 78:ed8466a608b4 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 277 return(result);
emilmont 78:ed8466a608b4 278 }
emilmont 78:ed8466a608b4 279
emilmont 78:ed8466a608b4 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 281 {
emilmont 78:ed8466a608b4 282 uint32_t result;
emilmont 78:ed8466a608b4 283
emilmont 78:ed8466a608b4 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 285 return(result);
emilmont 78:ed8466a608b4 286 }
emilmont 78:ed8466a608b4 287
emilmont 78:ed8466a608b4 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 289 {
emilmont 78:ed8466a608b4 290 uint32_t result;
emilmont 78:ed8466a608b4 291
emilmont 78:ed8466a608b4 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 293 return(result);
emilmont 78:ed8466a608b4 294 }
emilmont 78:ed8466a608b4 295
emilmont 78:ed8466a608b4 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 297 {
emilmont 78:ed8466a608b4 298 uint32_t result;
emilmont 78:ed8466a608b4 299
emilmont 78:ed8466a608b4 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 301 return(result);
emilmont 78:ed8466a608b4 302 }
emilmont 78:ed8466a608b4 303
emilmont 78:ed8466a608b4 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 305 {
emilmont 78:ed8466a608b4 306 uint32_t result;
emilmont 78:ed8466a608b4 307
emilmont 78:ed8466a608b4 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 309 return(result);
emilmont 78:ed8466a608b4 310 }
emilmont 78:ed8466a608b4 311
emilmont 78:ed8466a608b4 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 313 {
emilmont 78:ed8466a608b4 314 uint32_t result;
emilmont 78:ed8466a608b4 315
emilmont 78:ed8466a608b4 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 317 return(result);
emilmont 78:ed8466a608b4 318 }
emilmont 78:ed8466a608b4 319
emilmont 78:ed8466a608b4 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 321 {
emilmont 78:ed8466a608b4 322 uint32_t result;
emilmont 78:ed8466a608b4 323
emilmont 78:ed8466a608b4 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 325 return(result);
emilmont 78:ed8466a608b4 326 }
emilmont 78:ed8466a608b4 327
emilmont 78:ed8466a608b4 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 329 {
emilmont 78:ed8466a608b4 330 uint32_t result;
emilmont 78:ed8466a608b4 331
emilmont 78:ed8466a608b4 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 333 return(result);
emilmont 78:ed8466a608b4 334 }
emilmont 78:ed8466a608b4 335
emilmont 78:ed8466a608b4 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 337 {
emilmont 78:ed8466a608b4 338 uint32_t result;
emilmont 78:ed8466a608b4 339
emilmont 78:ed8466a608b4 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 341 return(result);
emilmont 78:ed8466a608b4 342 }
emilmont 78:ed8466a608b4 343
emilmont 78:ed8466a608b4 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 345 {
emilmont 78:ed8466a608b4 346 uint32_t result;
emilmont 78:ed8466a608b4 347
emilmont 78:ed8466a608b4 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 349 return(result);
emilmont 78:ed8466a608b4 350 }
emilmont 78:ed8466a608b4 351
emilmont 78:ed8466a608b4 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 353 {
emilmont 78:ed8466a608b4 354 uint32_t result;
emilmont 78:ed8466a608b4 355
emilmont 78:ed8466a608b4 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 357 return(result);
emilmont 78:ed8466a608b4 358 }
emilmont 78:ed8466a608b4 359
emilmont 78:ed8466a608b4 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 361 {
emilmont 78:ed8466a608b4 362 uint32_t result;
emilmont 78:ed8466a608b4 363
emilmont 78:ed8466a608b4 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 365 return(result);
emilmont 78:ed8466a608b4 366 }
emilmont 78:ed8466a608b4 367
emilmont 78:ed8466a608b4 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 369 {
emilmont 78:ed8466a608b4 370 uint32_t result;
emilmont 78:ed8466a608b4 371
emilmont 78:ed8466a608b4 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 373 return(result);
emilmont 78:ed8466a608b4 374 }
emilmont 78:ed8466a608b4 375
emilmont 78:ed8466a608b4 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 377 {
emilmont 78:ed8466a608b4 378 uint32_t result;
emilmont 78:ed8466a608b4 379
emilmont 78:ed8466a608b4 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 381 return(result);
emilmont 78:ed8466a608b4 382 }
emilmont 78:ed8466a608b4 383
emilmont 78:ed8466a608b4 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 385 {
emilmont 78:ed8466a608b4 386 uint32_t result;
emilmont 78:ed8466a608b4 387
emilmont 78:ed8466a608b4 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 389 return(result);
emilmont 78:ed8466a608b4 390 }
emilmont 78:ed8466a608b4 391
emilmont 78:ed8466a608b4 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 393 {
emilmont 78:ed8466a608b4 394 uint32_t result;
emilmont 78:ed8466a608b4 395
emilmont 78:ed8466a608b4 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 397 return(result);
emilmont 78:ed8466a608b4 398 }
emilmont 78:ed8466a608b4 399
emilmont 78:ed8466a608b4 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 401 {
emilmont 78:ed8466a608b4 402 uint32_t result;
emilmont 78:ed8466a608b4 403
emilmont 78:ed8466a608b4 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 405 return(result);
emilmont 78:ed8466a608b4 406 }
emilmont 78:ed8466a608b4 407
emilmont 78:ed8466a608b4 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 409 {
emilmont 78:ed8466a608b4 410 uint32_t result;
emilmont 78:ed8466a608b4 411
emilmont 78:ed8466a608b4 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 413 return(result);
emilmont 78:ed8466a608b4 414 }
emilmont 78:ed8466a608b4 415
emilmont 78:ed8466a608b4 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 417 {
emilmont 78:ed8466a608b4 418 uint32_t result;
emilmont 78:ed8466a608b4 419
emilmont 78:ed8466a608b4 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 421 return(result);
emilmont 78:ed8466a608b4 422 }
emilmont 78:ed8466a608b4 423
emilmont 78:ed8466a608b4 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 425 {
emilmont 78:ed8466a608b4 426 uint32_t result;
emilmont 78:ed8466a608b4 427
emilmont 78:ed8466a608b4 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 429 return(result);
emilmont 78:ed8466a608b4 430 }
emilmont 78:ed8466a608b4 431
emilmont 78:ed8466a608b4 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 433 {
emilmont 78:ed8466a608b4 434 uint32_t result;
emilmont 78:ed8466a608b4 435
emilmont 78:ed8466a608b4 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 437 return(result);
emilmont 78:ed8466a608b4 438 }
emilmont 78:ed8466a608b4 439
emilmont 78:ed8466a608b4 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 441 {
emilmont 78:ed8466a608b4 442 uint32_t result;
emilmont 78:ed8466a608b4 443
emilmont 78:ed8466a608b4 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 445 return(result);
emilmont 78:ed8466a608b4 446 }
emilmont 78:ed8466a608b4 447
emilmont 78:ed8466a608b4 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 449 {
emilmont 78:ed8466a608b4 450 uint32_t result;
emilmont 78:ed8466a608b4 451
emilmont 78:ed8466a608b4 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 453 return(result);
emilmont 78:ed8466a608b4 454 }
emilmont 78:ed8466a608b4 455
emilmont 78:ed8466a608b4 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
emilmont 78:ed8466a608b4 457 {
emilmont 78:ed8466a608b4 458 uint32_t result;
emilmont 78:ed8466a608b4 459
emilmont 78:ed8466a608b4 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 461 return(result);
emilmont 78:ed8466a608b4 462 }
emilmont 78:ed8466a608b4 463
emilmont 78:ed8466a608b4 464 #define __SSAT16(ARG1,ARG2) \
emilmont 78:ed8466a608b4 465 ({ \
emilmont 78:ed8466a608b4 466 uint32_t __RES, __ARG1 = (ARG1); \
emilmont 78:ed8466a608b4 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
emilmont 78:ed8466a608b4 468 __RES; \
emilmont 78:ed8466a608b4 469 })
emilmont 78:ed8466a608b4 470
emilmont 78:ed8466a608b4 471 #define __USAT16(ARG1,ARG2) \
emilmont 78:ed8466a608b4 472 ({ \
emilmont 78:ed8466a608b4 473 uint32_t __RES, __ARG1 = (ARG1); \
emilmont 78:ed8466a608b4 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
emilmont 78:ed8466a608b4 475 __RES; \
emilmont 78:ed8466a608b4 476 })
emilmont 78:ed8466a608b4 477
emilmont 78:ed8466a608b4 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
emilmont 78:ed8466a608b4 479 {
emilmont 78:ed8466a608b4 480 uint32_t result;
emilmont 78:ed8466a608b4 481
emilmont 78:ed8466a608b4 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
emilmont 78:ed8466a608b4 483 return(result);
emilmont 78:ed8466a608b4 484 }
emilmont 78:ed8466a608b4 485
emilmont 78:ed8466a608b4 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 487 {
emilmont 78:ed8466a608b4 488 uint32_t result;
emilmont 78:ed8466a608b4 489
emilmont 78:ed8466a608b4 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 491 return(result);
emilmont 78:ed8466a608b4 492 }
emilmont 78:ed8466a608b4 493
emilmont 78:ed8466a608b4 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
emilmont 78:ed8466a608b4 495 {
emilmont 78:ed8466a608b4 496 uint32_t result;
emilmont 78:ed8466a608b4 497
emilmont 78:ed8466a608b4 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
emilmont 78:ed8466a608b4 499 return(result);
emilmont 78:ed8466a608b4 500 }
emilmont 78:ed8466a608b4 501
emilmont 78:ed8466a608b4 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 503 {
emilmont 78:ed8466a608b4 504 uint32_t result;
emilmont 78:ed8466a608b4 505
emilmont 78:ed8466a608b4 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 507 return(result);
emilmont 78:ed8466a608b4 508 }
emilmont 78:ed8466a608b4 509
emilmont 78:ed8466a608b4 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 511 {
emilmont 78:ed8466a608b4 512 uint32_t result;
emilmont 78:ed8466a608b4 513
emilmont 78:ed8466a608b4 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 515 return(result);
emilmont 78:ed8466a608b4 516 }
emilmont 78:ed8466a608b4 517
emilmont 78:ed8466a608b4 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 519 {
emilmont 78:ed8466a608b4 520 uint32_t result;
emilmont 78:ed8466a608b4 521
emilmont 78:ed8466a608b4 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 523 return(result);
emilmont 78:ed8466a608b4 524 }
emilmont 78:ed8466a608b4 525
emilmont 78:ed8466a608b4 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
emilmont 78:ed8466a608b4 527 {
emilmont 78:ed8466a608b4 528 uint32_t result;
emilmont 78:ed8466a608b4 529
emilmont 78:ed8466a608b4 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 531 return(result);
emilmont 78:ed8466a608b4 532 }
emilmont 78:ed8466a608b4 533
emilmont 78:ed8466a608b4 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
emilmont 78:ed8466a608b4 535 {
emilmont 78:ed8466a608b4 536 uint32_t result;
emilmont 78:ed8466a608b4 537
emilmont 78:ed8466a608b4 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 539 return(result);
emilmont 78:ed8466a608b4 540 }
emilmont 78:ed8466a608b4 541
emilmont 78:ed8466a608b4 542 #define __SMLALD(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 543 ({ \
emilmont 78:ed8466a608b4 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
emilmont 78:ed8466a608b4 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
emilmont 78:ed8466a608b4 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
emilmont 78:ed8466a608b4 547 })
emilmont 78:ed8466a608b4 548
emilmont 78:ed8466a608b4 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 550 ({ \
emilmont 78:ed8466a608b4 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
emilmont 78:ed8466a608b4 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
emilmont 78:ed8466a608b4 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
emilmont 78:ed8466a608b4 554 })
emilmont 78:ed8466a608b4 555
emilmont 78:ed8466a608b4 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 557 {
emilmont 78:ed8466a608b4 558 uint32_t result;
emilmont 78:ed8466a608b4 559
emilmont 78:ed8466a608b4 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 561 return(result);
emilmont 78:ed8466a608b4 562 }
emilmont 78:ed8466a608b4 563
emilmont 78:ed8466a608b4 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 565 {
emilmont 78:ed8466a608b4 566 uint32_t result;
emilmont 78:ed8466a608b4 567
emilmont 78:ed8466a608b4 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 569 return(result);
emilmont 78:ed8466a608b4 570 }
emilmont 78:ed8466a608b4 571
emilmont 78:ed8466a608b4 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
emilmont 78:ed8466a608b4 573 {
emilmont 78:ed8466a608b4 574 uint32_t result;
emilmont 78:ed8466a608b4 575
emilmont 78:ed8466a608b4 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 577 return(result);
emilmont 78:ed8466a608b4 578 }
emilmont 78:ed8466a608b4 579
emilmont 78:ed8466a608b4 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
emilmont 78:ed8466a608b4 581 {
emilmont 78:ed8466a608b4 582 uint32_t result;
emilmont 78:ed8466a608b4 583
emilmont 78:ed8466a608b4 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 585 return(result);
emilmont 78:ed8466a608b4 586 }
emilmont 78:ed8466a608b4 587
emilmont 78:ed8466a608b4 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 589 ({ \
emilmont 78:ed8466a608b4 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
emilmont 78:ed8466a608b4 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
emilmont 78:ed8466a608b4 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
emilmont 78:ed8466a608b4 593 })
emilmont 78:ed8466a608b4 594
emilmont 78:ed8466a608b4 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 596 ({ \
emilmont 78:ed8466a608b4 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
emilmont 78:ed8466a608b4 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
emilmont 78:ed8466a608b4 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
emilmont 78:ed8466a608b4 600 })
emilmont 78:ed8466a608b4 601
emilmont 78:ed8466a608b4 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 603 {
emilmont 78:ed8466a608b4 604 uint32_t result;
emilmont 78:ed8466a608b4 605
emilmont 78:ed8466a608b4 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 607 return(result);
emilmont 78:ed8466a608b4 608 }
emilmont 78:ed8466a608b4 609
emilmont 78:ed8466a608b4 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 611 {
emilmont 78:ed8466a608b4 612 uint32_t result;
emilmont 78:ed8466a608b4 613
emilmont 78:ed8466a608b4 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 615 return(result);
emilmont 78:ed8466a608b4 616 }
emilmont 78:ed8466a608b4 617
emilmont 78:ed8466a608b4 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
emilmont 78:ed8466a608b4 619 {
emilmont 78:ed8466a608b4 620 uint32_t result;
emilmont 78:ed8466a608b4 621
emilmont 78:ed8466a608b4 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
emilmont 78:ed8466a608b4 623 return(result);
emilmont 78:ed8466a608b4 624 }
emilmont 78:ed8466a608b4 625
emilmont 78:ed8466a608b4 626 #define __PKHBT(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 627 ({ \
emilmont 78:ed8466a608b4 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
emilmont 78:ed8466a608b4 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
emilmont 78:ed8466a608b4 630 __RES; \
emilmont 78:ed8466a608b4 631 })
emilmont 78:ed8466a608b4 632
emilmont 78:ed8466a608b4 633 #define __PKHTB(ARG1,ARG2,ARG3) \
emilmont 78:ed8466a608b4 634 ({ \
emilmont 78:ed8466a608b4 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
emilmont 78:ed8466a608b4 636 if (ARG3 == 0) \
emilmont 78:ed8466a608b4 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
emilmont 78:ed8466a608b4 638 else \
emilmont 78:ed8466a608b4 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
emilmont 78:ed8466a608b4 640 __RES; \
emilmont 78:ed8466a608b4 641 })
emilmont 78:ed8466a608b4 642
emilmont 78:ed8466a608b4 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
emilmont 78:ed8466a608b4 644 {
emilmont 78:ed8466a608b4 645 int32_t result;
emilmont 78:ed8466a608b4 646
emilmont 78:ed8466a608b4 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
emilmont 78:ed8466a608b4 648 return(result);
emilmont 78:ed8466a608b4 649 }
emilmont 78:ed8466a608b4 650
emilmont 78:ed8466a608b4 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 652
emilmont 78:ed8466a608b4 653
emilmont 78:ed8466a608b4 654
emilmont 78:ed8466a608b4 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
emilmont 78:ed8466a608b4 656 /* TASKING carm specific functions */
emilmont 78:ed8466a608b4 657
emilmont 78:ed8466a608b4 658
emilmont 78:ed8466a608b4 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 660 /* not yet supported */
emilmont 78:ed8466a608b4 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
emilmont 78:ed8466a608b4 662
emilmont 78:ed8466a608b4 663
emilmont 78:ed8466a608b4 664 #endif
emilmont 78:ed8466a608b4 665
emilmont 78:ed8466a608b4 666 /*@} end of group CMSIS_SIMD_intrinsics */
emilmont 78:ed8466a608b4 667
emilmont 78:ed8466a608b4 668
emilmont 78:ed8466a608b4 669 #endif /* __CORE_CM4_SIMD_H */
emilmont 78:ed8466a608b4 670
emilmont 78:ed8466a608b4 671 #ifdef __cplusplus
emilmont 78:ed8466a608b4 672 }
emilmont 78:ed8466a608b4 673 #endif