pro vyuku PSS v Jecne

Committer:
vladvana
Date:
Sun Sep 24 12:31:52 2017 +0000
Revision:
0:23d1f73bf130
podklady pro cviceni z PSS

Who changed what in which revision?

UserRevisionLine numberNew contents of line
vladvana 0:23d1f73bf130 1 /**************************************************************************//**
vladvana 0:23d1f73bf130 2 * @file core_cm4_simd.h
vladvana 0:23d1f73bf130 3 * @brief CMSIS Cortex-M4 SIMD Header File
vladvana 0:23d1f73bf130 4 * @version V3.20
vladvana 0:23d1f73bf130 5 * @date 25. February 2013
vladvana 0:23d1f73bf130 6 *
vladvana 0:23d1f73bf130 7 * @note
vladvana 0:23d1f73bf130 8 *
vladvana 0:23d1f73bf130 9 ******************************************************************************/
vladvana 0:23d1f73bf130 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
vladvana 0:23d1f73bf130 11
vladvana 0:23d1f73bf130 12 All rights reserved.
vladvana 0:23d1f73bf130 13 Redistribution and use in source and binary forms, with or without
vladvana 0:23d1f73bf130 14 modification, are permitted provided that the following conditions are met:
vladvana 0:23d1f73bf130 15 - Redistributions of source code must retain the above copyright
vladvana 0:23d1f73bf130 16 notice, this list of conditions and the following disclaimer.
vladvana 0:23d1f73bf130 17 - Redistributions in binary form must reproduce the above copyright
vladvana 0:23d1f73bf130 18 notice, this list of conditions and the following disclaimer in the
vladvana 0:23d1f73bf130 19 documentation and/or other materials provided with the distribution.
vladvana 0:23d1f73bf130 20 - Neither the name of ARM nor the names of its contributors may be used
vladvana 0:23d1f73bf130 21 to endorse or promote products derived from this software without
vladvana 0:23d1f73bf130 22 specific prior written permission.
vladvana 0:23d1f73bf130 23 *
vladvana 0:23d1f73bf130 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
vladvana 0:23d1f73bf130 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
vladvana 0:23d1f73bf130 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
vladvana 0:23d1f73bf130 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
vladvana 0:23d1f73bf130 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
vladvana 0:23d1f73bf130 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
vladvana 0:23d1f73bf130 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
vladvana 0:23d1f73bf130 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
vladvana 0:23d1f73bf130 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
vladvana 0:23d1f73bf130 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
vladvana 0:23d1f73bf130 34 POSSIBILITY OF SUCH DAMAGE.
vladvana 0:23d1f73bf130 35 ---------------------------------------------------------------------------*/
vladvana 0:23d1f73bf130 36
vladvana 0:23d1f73bf130 37
vladvana 0:23d1f73bf130 38 #ifdef __cplusplus
vladvana 0:23d1f73bf130 39 extern "C" {
vladvana 0:23d1f73bf130 40 #endif
vladvana 0:23d1f73bf130 41
vladvana 0:23d1f73bf130 42 #ifndef __CORE_CM4_SIMD_H
vladvana 0:23d1f73bf130 43 #define __CORE_CM4_SIMD_H
vladvana 0:23d1f73bf130 44
vladvana 0:23d1f73bf130 45
vladvana 0:23d1f73bf130 46 /*******************************************************************************
vladvana 0:23d1f73bf130 47 * Hardware Abstraction Layer
vladvana 0:23d1f73bf130 48 ******************************************************************************/
vladvana 0:23d1f73bf130 49
vladvana 0:23d1f73bf130 50
vladvana 0:23d1f73bf130 51 /* ################### Compiler specific Intrinsics ########################### */
vladvana 0:23d1f73bf130 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
vladvana 0:23d1f73bf130 53 Access to dedicated SIMD instructions
vladvana 0:23d1f73bf130 54 @{
vladvana 0:23d1f73bf130 55 */
vladvana 0:23d1f73bf130 56
vladvana 0:23d1f73bf130 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
vladvana 0:23d1f73bf130 58 /* ARM armcc specific functions */
vladvana 0:23d1f73bf130 59
vladvana 0:23d1f73bf130 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 61 #define __SADD8 __sadd8
vladvana 0:23d1f73bf130 62 #define __QADD8 __qadd8
vladvana 0:23d1f73bf130 63 #define __SHADD8 __shadd8
vladvana 0:23d1f73bf130 64 #define __UADD8 __uadd8
vladvana 0:23d1f73bf130 65 #define __UQADD8 __uqadd8
vladvana 0:23d1f73bf130 66 #define __UHADD8 __uhadd8
vladvana 0:23d1f73bf130 67 #define __SSUB8 __ssub8
vladvana 0:23d1f73bf130 68 #define __QSUB8 __qsub8
vladvana 0:23d1f73bf130 69 #define __SHSUB8 __shsub8
vladvana 0:23d1f73bf130 70 #define __USUB8 __usub8
vladvana 0:23d1f73bf130 71 #define __UQSUB8 __uqsub8
vladvana 0:23d1f73bf130 72 #define __UHSUB8 __uhsub8
vladvana 0:23d1f73bf130 73 #define __SADD16 __sadd16
vladvana 0:23d1f73bf130 74 #define __QADD16 __qadd16
vladvana 0:23d1f73bf130 75 #define __SHADD16 __shadd16
vladvana 0:23d1f73bf130 76 #define __UADD16 __uadd16
vladvana 0:23d1f73bf130 77 #define __UQADD16 __uqadd16
vladvana 0:23d1f73bf130 78 #define __UHADD16 __uhadd16
vladvana 0:23d1f73bf130 79 #define __SSUB16 __ssub16
vladvana 0:23d1f73bf130 80 #define __QSUB16 __qsub16
vladvana 0:23d1f73bf130 81 #define __SHSUB16 __shsub16
vladvana 0:23d1f73bf130 82 #define __USUB16 __usub16
vladvana 0:23d1f73bf130 83 #define __UQSUB16 __uqsub16
vladvana 0:23d1f73bf130 84 #define __UHSUB16 __uhsub16
vladvana 0:23d1f73bf130 85 #define __SASX __sasx
vladvana 0:23d1f73bf130 86 #define __QASX __qasx
vladvana 0:23d1f73bf130 87 #define __SHASX __shasx
vladvana 0:23d1f73bf130 88 #define __UASX __uasx
vladvana 0:23d1f73bf130 89 #define __UQASX __uqasx
vladvana 0:23d1f73bf130 90 #define __UHASX __uhasx
vladvana 0:23d1f73bf130 91 #define __SSAX __ssax
vladvana 0:23d1f73bf130 92 #define __QSAX __qsax
vladvana 0:23d1f73bf130 93 #define __SHSAX __shsax
vladvana 0:23d1f73bf130 94 #define __USAX __usax
vladvana 0:23d1f73bf130 95 #define __UQSAX __uqsax
vladvana 0:23d1f73bf130 96 #define __UHSAX __uhsax
vladvana 0:23d1f73bf130 97 #define __USAD8 __usad8
vladvana 0:23d1f73bf130 98 #define __USADA8 __usada8
vladvana 0:23d1f73bf130 99 #define __SSAT16 __ssat16
vladvana 0:23d1f73bf130 100 #define __USAT16 __usat16
vladvana 0:23d1f73bf130 101 #define __UXTB16 __uxtb16
vladvana 0:23d1f73bf130 102 #define __UXTAB16 __uxtab16
vladvana 0:23d1f73bf130 103 #define __SXTB16 __sxtb16
vladvana 0:23d1f73bf130 104 #define __SXTAB16 __sxtab16
vladvana 0:23d1f73bf130 105 #define __SMUAD __smuad
vladvana 0:23d1f73bf130 106 #define __SMUADX __smuadx
vladvana 0:23d1f73bf130 107 #define __SMLAD __smlad
vladvana 0:23d1f73bf130 108 #define __SMLADX __smladx
vladvana 0:23d1f73bf130 109 #define __SMLALD __smlald
vladvana 0:23d1f73bf130 110 #define __SMLALDX __smlaldx
vladvana 0:23d1f73bf130 111 #define __SMUSD __smusd
vladvana 0:23d1f73bf130 112 #define __SMUSDX __smusdx
vladvana 0:23d1f73bf130 113 #define __SMLSD __smlsd
vladvana 0:23d1f73bf130 114 #define __SMLSDX __smlsdx
vladvana 0:23d1f73bf130 115 #define __SMLSLD __smlsld
vladvana 0:23d1f73bf130 116 #define __SMLSLDX __smlsldx
vladvana 0:23d1f73bf130 117 #define __SEL __sel
vladvana 0:23d1f73bf130 118 #define __QADD __qadd
vladvana 0:23d1f73bf130 119 #define __QSUB __qsub
vladvana 0:23d1f73bf130 120
vladvana 0:23d1f73bf130 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
vladvana 0:23d1f73bf130 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
vladvana 0:23d1f73bf130 123
vladvana 0:23d1f73bf130 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
vladvana 0:23d1f73bf130 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
vladvana 0:23d1f73bf130 126
vladvana 0:23d1f73bf130 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
vladvana 0:23d1f73bf130 128 ((int64_t)(ARG3) << 32) ) >> 32))
vladvana 0:23d1f73bf130 129
vladvana 0:23d1f73bf130 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 131
vladvana 0:23d1f73bf130 132
vladvana 0:23d1f73bf130 133
vladvana 0:23d1f73bf130 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
vladvana 0:23d1f73bf130 135 /* IAR iccarm specific functions */
vladvana 0:23d1f73bf130 136
vladvana 0:23d1f73bf130 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 138 #include <cmsis_iar.h>
vladvana 0:23d1f73bf130 139
vladvana 0:23d1f73bf130 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 141
vladvana 0:23d1f73bf130 142
vladvana 0:23d1f73bf130 143
vladvana 0:23d1f73bf130 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
vladvana 0:23d1f73bf130 145 /* TI CCS specific functions */
vladvana 0:23d1f73bf130 146
vladvana 0:23d1f73bf130 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 148 #include <cmsis_ccs.h>
vladvana 0:23d1f73bf130 149
vladvana 0:23d1f73bf130 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 151
vladvana 0:23d1f73bf130 152
vladvana 0:23d1f73bf130 153
vladvana 0:23d1f73bf130 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
vladvana 0:23d1f73bf130 155 /* GNU gcc specific functions */
vladvana 0:23d1f73bf130 156
vladvana 0:23d1f73bf130 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 159 {
vladvana 0:23d1f73bf130 160 uint32_t result;
vladvana 0:23d1f73bf130 161
vladvana 0:23d1f73bf130 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 163 return(result);
vladvana 0:23d1f73bf130 164 }
vladvana 0:23d1f73bf130 165
vladvana 0:23d1f73bf130 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 167 {
vladvana 0:23d1f73bf130 168 uint32_t result;
vladvana 0:23d1f73bf130 169
vladvana 0:23d1f73bf130 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 171 return(result);
vladvana 0:23d1f73bf130 172 }
vladvana 0:23d1f73bf130 173
vladvana 0:23d1f73bf130 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 175 {
vladvana 0:23d1f73bf130 176 uint32_t result;
vladvana 0:23d1f73bf130 177
vladvana 0:23d1f73bf130 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 179 return(result);
vladvana 0:23d1f73bf130 180 }
vladvana 0:23d1f73bf130 181
vladvana 0:23d1f73bf130 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 183 {
vladvana 0:23d1f73bf130 184 uint32_t result;
vladvana 0:23d1f73bf130 185
vladvana 0:23d1f73bf130 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 187 return(result);
vladvana 0:23d1f73bf130 188 }
vladvana 0:23d1f73bf130 189
vladvana 0:23d1f73bf130 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 191 {
vladvana 0:23d1f73bf130 192 uint32_t result;
vladvana 0:23d1f73bf130 193
vladvana 0:23d1f73bf130 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 195 return(result);
vladvana 0:23d1f73bf130 196 }
vladvana 0:23d1f73bf130 197
vladvana 0:23d1f73bf130 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 199 {
vladvana 0:23d1f73bf130 200 uint32_t result;
vladvana 0:23d1f73bf130 201
vladvana 0:23d1f73bf130 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 203 return(result);
vladvana 0:23d1f73bf130 204 }
vladvana 0:23d1f73bf130 205
vladvana 0:23d1f73bf130 206
vladvana 0:23d1f73bf130 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 208 {
vladvana 0:23d1f73bf130 209 uint32_t result;
vladvana 0:23d1f73bf130 210
vladvana 0:23d1f73bf130 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 212 return(result);
vladvana 0:23d1f73bf130 213 }
vladvana 0:23d1f73bf130 214
vladvana 0:23d1f73bf130 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 216 {
vladvana 0:23d1f73bf130 217 uint32_t result;
vladvana 0:23d1f73bf130 218
vladvana 0:23d1f73bf130 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 220 return(result);
vladvana 0:23d1f73bf130 221 }
vladvana 0:23d1f73bf130 222
vladvana 0:23d1f73bf130 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 224 {
vladvana 0:23d1f73bf130 225 uint32_t result;
vladvana 0:23d1f73bf130 226
vladvana 0:23d1f73bf130 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 228 return(result);
vladvana 0:23d1f73bf130 229 }
vladvana 0:23d1f73bf130 230
vladvana 0:23d1f73bf130 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 232 {
vladvana 0:23d1f73bf130 233 uint32_t result;
vladvana 0:23d1f73bf130 234
vladvana 0:23d1f73bf130 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 236 return(result);
vladvana 0:23d1f73bf130 237 }
vladvana 0:23d1f73bf130 238
vladvana 0:23d1f73bf130 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 240 {
vladvana 0:23d1f73bf130 241 uint32_t result;
vladvana 0:23d1f73bf130 242
vladvana 0:23d1f73bf130 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 244 return(result);
vladvana 0:23d1f73bf130 245 }
vladvana 0:23d1f73bf130 246
vladvana 0:23d1f73bf130 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 248 {
vladvana 0:23d1f73bf130 249 uint32_t result;
vladvana 0:23d1f73bf130 250
vladvana 0:23d1f73bf130 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 252 return(result);
vladvana 0:23d1f73bf130 253 }
vladvana 0:23d1f73bf130 254
vladvana 0:23d1f73bf130 255
vladvana 0:23d1f73bf130 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 257 {
vladvana 0:23d1f73bf130 258 uint32_t result;
vladvana 0:23d1f73bf130 259
vladvana 0:23d1f73bf130 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 261 return(result);
vladvana 0:23d1f73bf130 262 }
vladvana 0:23d1f73bf130 263
vladvana 0:23d1f73bf130 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 265 {
vladvana 0:23d1f73bf130 266 uint32_t result;
vladvana 0:23d1f73bf130 267
vladvana 0:23d1f73bf130 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 269 return(result);
vladvana 0:23d1f73bf130 270 }
vladvana 0:23d1f73bf130 271
vladvana 0:23d1f73bf130 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 273 {
vladvana 0:23d1f73bf130 274 uint32_t result;
vladvana 0:23d1f73bf130 275
vladvana 0:23d1f73bf130 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 277 return(result);
vladvana 0:23d1f73bf130 278 }
vladvana 0:23d1f73bf130 279
vladvana 0:23d1f73bf130 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 281 {
vladvana 0:23d1f73bf130 282 uint32_t result;
vladvana 0:23d1f73bf130 283
vladvana 0:23d1f73bf130 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 285 return(result);
vladvana 0:23d1f73bf130 286 }
vladvana 0:23d1f73bf130 287
vladvana 0:23d1f73bf130 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 289 {
vladvana 0:23d1f73bf130 290 uint32_t result;
vladvana 0:23d1f73bf130 291
vladvana 0:23d1f73bf130 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 293 return(result);
vladvana 0:23d1f73bf130 294 }
vladvana 0:23d1f73bf130 295
vladvana 0:23d1f73bf130 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 297 {
vladvana 0:23d1f73bf130 298 uint32_t result;
vladvana 0:23d1f73bf130 299
vladvana 0:23d1f73bf130 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 301 return(result);
vladvana 0:23d1f73bf130 302 }
vladvana 0:23d1f73bf130 303
vladvana 0:23d1f73bf130 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 305 {
vladvana 0:23d1f73bf130 306 uint32_t result;
vladvana 0:23d1f73bf130 307
vladvana 0:23d1f73bf130 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 309 return(result);
vladvana 0:23d1f73bf130 310 }
vladvana 0:23d1f73bf130 311
vladvana 0:23d1f73bf130 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 313 {
vladvana 0:23d1f73bf130 314 uint32_t result;
vladvana 0:23d1f73bf130 315
vladvana 0:23d1f73bf130 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 317 return(result);
vladvana 0:23d1f73bf130 318 }
vladvana 0:23d1f73bf130 319
vladvana 0:23d1f73bf130 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 321 {
vladvana 0:23d1f73bf130 322 uint32_t result;
vladvana 0:23d1f73bf130 323
vladvana 0:23d1f73bf130 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 325 return(result);
vladvana 0:23d1f73bf130 326 }
vladvana 0:23d1f73bf130 327
vladvana 0:23d1f73bf130 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 329 {
vladvana 0:23d1f73bf130 330 uint32_t result;
vladvana 0:23d1f73bf130 331
vladvana 0:23d1f73bf130 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 333 return(result);
vladvana 0:23d1f73bf130 334 }
vladvana 0:23d1f73bf130 335
vladvana 0:23d1f73bf130 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 337 {
vladvana 0:23d1f73bf130 338 uint32_t result;
vladvana 0:23d1f73bf130 339
vladvana 0:23d1f73bf130 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 341 return(result);
vladvana 0:23d1f73bf130 342 }
vladvana 0:23d1f73bf130 343
vladvana 0:23d1f73bf130 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 345 {
vladvana 0:23d1f73bf130 346 uint32_t result;
vladvana 0:23d1f73bf130 347
vladvana 0:23d1f73bf130 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 349 return(result);
vladvana 0:23d1f73bf130 350 }
vladvana 0:23d1f73bf130 351
vladvana 0:23d1f73bf130 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 353 {
vladvana 0:23d1f73bf130 354 uint32_t result;
vladvana 0:23d1f73bf130 355
vladvana 0:23d1f73bf130 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 357 return(result);
vladvana 0:23d1f73bf130 358 }
vladvana 0:23d1f73bf130 359
vladvana 0:23d1f73bf130 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 361 {
vladvana 0:23d1f73bf130 362 uint32_t result;
vladvana 0:23d1f73bf130 363
vladvana 0:23d1f73bf130 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 365 return(result);
vladvana 0:23d1f73bf130 366 }
vladvana 0:23d1f73bf130 367
vladvana 0:23d1f73bf130 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 369 {
vladvana 0:23d1f73bf130 370 uint32_t result;
vladvana 0:23d1f73bf130 371
vladvana 0:23d1f73bf130 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 373 return(result);
vladvana 0:23d1f73bf130 374 }
vladvana 0:23d1f73bf130 375
vladvana 0:23d1f73bf130 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 377 {
vladvana 0:23d1f73bf130 378 uint32_t result;
vladvana 0:23d1f73bf130 379
vladvana 0:23d1f73bf130 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 381 return(result);
vladvana 0:23d1f73bf130 382 }
vladvana 0:23d1f73bf130 383
vladvana 0:23d1f73bf130 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 385 {
vladvana 0:23d1f73bf130 386 uint32_t result;
vladvana 0:23d1f73bf130 387
vladvana 0:23d1f73bf130 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 389 return(result);
vladvana 0:23d1f73bf130 390 }
vladvana 0:23d1f73bf130 391
vladvana 0:23d1f73bf130 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 393 {
vladvana 0:23d1f73bf130 394 uint32_t result;
vladvana 0:23d1f73bf130 395
vladvana 0:23d1f73bf130 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 397 return(result);
vladvana 0:23d1f73bf130 398 }
vladvana 0:23d1f73bf130 399
vladvana 0:23d1f73bf130 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 401 {
vladvana 0:23d1f73bf130 402 uint32_t result;
vladvana 0:23d1f73bf130 403
vladvana 0:23d1f73bf130 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 405 return(result);
vladvana 0:23d1f73bf130 406 }
vladvana 0:23d1f73bf130 407
vladvana 0:23d1f73bf130 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 409 {
vladvana 0:23d1f73bf130 410 uint32_t result;
vladvana 0:23d1f73bf130 411
vladvana 0:23d1f73bf130 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 413 return(result);
vladvana 0:23d1f73bf130 414 }
vladvana 0:23d1f73bf130 415
vladvana 0:23d1f73bf130 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 417 {
vladvana 0:23d1f73bf130 418 uint32_t result;
vladvana 0:23d1f73bf130 419
vladvana 0:23d1f73bf130 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 421 return(result);
vladvana 0:23d1f73bf130 422 }
vladvana 0:23d1f73bf130 423
vladvana 0:23d1f73bf130 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 425 {
vladvana 0:23d1f73bf130 426 uint32_t result;
vladvana 0:23d1f73bf130 427
vladvana 0:23d1f73bf130 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 429 return(result);
vladvana 0:23d1f73bf130 430 }
vladvana 0:23d1f73bf130 431
vladvana 0:23d1f73bf130 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 433 {
vladvana 0:23d1f73bf130 434 uint32_t result;
vladvana 0:23d1f73bf130 435
vladvana 0:23d1f73bf130 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 437 return(result);
vladvana 0:23d1f73bf130 438 }
vladvana 0:23d1f73bf130 439
vladvana 0:23d1f73bf130 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 441 {
vladvana 0:23d1f73bf130 442 uint32_t result;
vladvana 0:23d1f73bf130 443
vladvana 0:23d1f73bf130 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 445 return(result);
vladvana 0:23d1f73bf130 446 }
vladvana 0:23d1f73bf130 447
vladvana 0:23d1f73bf130 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 449 {
vladvana 0:23d1f73bf130 450 uint32_t result;
vladvana 0:23d1f73bf130 451
vladvana 0:23d1f73bf130 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 453 return(result);
vladvana 0:23d1f73bf130 454 }
vladvana 0:23d1f73bf130 455
vladvana 0:23d1f73bf130 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 457 {
vladvana 0:23d1f73bf130 458 uint32_t result;
vladvana 0:23d1f73bf130 459
vladvana 0:23d1f73bf130 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 461 return(result);
vladvana 0:23d1f73bf130 462 }
vladvana 0:23d1f73bf130 463
vladvana 0:23d1f73bf130 464 #define __SSAT16(ARG1,ARG2) \
vladvana 0:23d1f73bf130 465 ({ \
vladvana 0:23d1f73bf130 466 uint32_t __RES, __ARG1 = (ARG1); \
vladvana 0:23d1f73bf130 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vladvana 0:23d1f73bf130 468 __RES; \
vladvana 0:23d1f73bf130 469 })
vladvana 0:23d1f73bf130 470
vladvana 0:23d1f73bf130 471 #define __USAT16(ARG1,ARG2) \
vladvana 0:23d1f73bf130 472 ({ \
vladvana 0:23d1f73bf130 473 uint32_t __RES, __ARG1 = (ARG1); \
vladvana 0:23d1f73bf130 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vladvana 0:23d1f73bf130 475 __RES; \
vladvana 0:23d1f73bf130 476 })
vladvana 0:23d1f73bf130 477
vladvana 0:23d1f73bf130 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
vladvana 0:23d1f73bf130 479 {
vladvana 0:23d1f73bf130 480 uint32_t result;
vladvana 0:23d1f73bf130 481
vladvana 0:23d1f73bf130 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
vladvana 0:23d1f73bf130 483 return(result);
vladvana 0:23d1f73bf130 484 }
vladvana 0:23d1f73bf130 485
vladvana 0:23d1f73bf130 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 487 {
vladvana 0:23d1f73bf130 488 uint32_t result;
vladvana 0:23d1f73bf130 489
vladvana 0:23d1f73bf130 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 491 return(result);
vladvana 0:23d1f73bf130 492 }
vladvana 0:23d1f73bf130 493
vladvana 0:23d1f73bf130 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
vladvana 0:23d1f73bf130 495 {
vladvana 0:23d1f73bf130 496 uint32_t result;
vladvana 0:23d1f73bf130 497
vladvana 0:23d1f73bf130 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
vladvana 0:23d1f73bf130 499 return(result);
vladvana 0:23d1f73bf130 500 }
vladvana 0:23d1f73bf130 501
vladvana 0:23d1f73bf130 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 503 {
vladvana 0:23d1f73bf130 504 uint32_t result;
vladvana 0:23d1f73bf130 505
vladvana 0:23d1f73bf130 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 507 return(result);
vladvana 0:23d1f73bf130 508 }
vladvana 0:23d1f73bf130 509
vladvana 0:23d1f73bf130 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 511 {
vladvana 0:23d1f73bf130 512 uint32_t result;
vladvana 0:23d1f73bf130 513
vladvana 0:23d1f73bf130 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 515 return(result);
vladvana 0:23d1f73bf130 516 }
vladvana 0:23d1f73bf130 517
vladvana 0:23d1f73bf130 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 519 {
vladvana 0:23d1f73bf130 520 uint32_t result;
vladvana 0:23d1f73bf130 521
vladvana 0:23d1f73bf130 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 523 return(result);
vladvana 0:23d1f73bf130 524 }
vladvana 0:23d1f73bf130 525
vladvana 0:23d1f73bf130 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 527 {
vladvana 0:23d1f73bf130 528 uint32_t result;
vladvana 0:23d1f73bf130 529
vladvana 0:23d1f73bf130 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 531 return(result);
vladvana 0:23d1f73bf130 532 }
vladvana 0:23d1f73bf130 533
vladvana 0:23d1f73bf130 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 535 {
vladvana 0:23d1f73bf130 536 uint32_t result;
vladvana 0:23d1f73bf130 537
vladvana 0:23d1f73bf130 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 539 return(result);
vladvana 0:23d1f73bf130 540 }
vladvana 0:23d1f73bf130 541
vladvana 0:23d1f73bf130 542 #define __SMLALD(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 543 ({ \
vladvana 0:23d1f73bf130 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
vladvana 0:23d1f73bf130 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vladvana 0:23d1f73bf130 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vladvana 0:23d1f73bf130 547 })
vladvana 0:23d1f73bf130 548
vladvana 0:23d1f73bf130 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 550 ({ \
vladvana 0:23d1f73bf130 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
vladvana 0:23d1f73bf130 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vladvana 0:23d1f73bf130 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vladvana 0:23d1f73bf130 554 })
vladvana 0:23d1f73bf130 555
vladvana 0:23d1f73bf130 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 557 {
vladvana 0:23d1f73bf130 558 uint32_t result;
vladvana 0:23d1f73bf130 559
vladvana 0:23d1f73bf130 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 561 return(result);
vladvana 0:23d1f73bf130 562 }
vladvana 0:23d1f73bf130 563
vladvana 0:23d1f73bf130 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 565 {
vladvana 0:23d1f73bf130 566 uint32_t result;
vladvana 0:23d1f73bf130 567
vladvana 0:23d1f73bf130 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 569 return(result);
vladvana 0:23d1f73bf130 570 }
vladvana 0:23d1f73bf130 571
vladvana 0:23d1f73bf130 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 573 {
vladvana 0:23d1f73bf130 574 uint32_t result;
vladvana 0:23d1f73bf130 575
vladvana 0:23d1f73bf130 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 577 return(result);
vladvana 0:23d1f73bf130 578 }
vladvana 0:23d1f73bf130 579
vladvana 0:23d1f73bf130 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 581 {
vladvana 0:23d1f73bf130 582 uint32_t result;
vladvana 0:23d1f73bf130 583
vladvana 0:23d1f73bf130 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 585 return(result);
vladvana 0:23d1f73bf130 586 }
vladvana 0:23d1f73bf130 587
vladvana 0:23d1f73bf130 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 589 ({ \
vladvana 0:23d1f73bf130 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
vladvana 0:23d1f73bf130 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vladvana 0:23d1f73bf130 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vladvana 0:23d1f73bf130 593 })
vladvana 0:23d1f73bf130 594
vladvana 0:23d1f73bf130 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 596 ({ \
vladvana 0:23d1f73bf130 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
vladvana 0:23d1f73bf130 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vladvana 0:23d1f73bf130 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vladvana 0:23d1f73bf130 600 })
vladvana 0:23d1f73bf130 601
vladvana 0:23d1f73bf130 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 603 {
vladvana 0:23d1f73bf130 604 uint32_t result;
vladvana 0:23d1f73bf130 605
vladvana 0:23d1f73bf130 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 607 return(result);
vladvana 0:23d1f73bf130 608 }
vladvana 0:23d1f73bf130 609
vladvana 0:23d1f73bf130 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 611 {
vladvana 0:23d1f73bf130 612 uint32_t result;
vladvana 0:23d1f73bf130 613
vladvana 0:23d1f73bf130 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 615 return(result);
vladvana 0:23d1f73bf130 616 }
vladvana 0:23d1f73bf130 617
vladvana 0:23d1f73bf130 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 619 {
vladvana 0:23d1f73bf130 620 uint32_t result;
vladvana 0:23d1f73bf130 621
vladvana 0:23d1f73bf130 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 623 return(result);
vladvana 0:23d1f73bf130 624 }
vladvana 0:23d1f73bf130 625
vladvana 0:23d1f73bf130 626 #define __PKHBT(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 627 ({ \
vladvana 0:23d1f73bf130 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vladvana 0:23d1f73bf130 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vladvana 0:23d1f73bf130 630 __RES; \
vladvana 0:23d1f73bf130 631 })
vladvana 0:23d1f73bf130 632
vladvana 0:23d1f73bf130 633 #define __PKHTB(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 634 ({ \
vladvana 0:23d1f73bf130 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vladvana 0:23d1f73bf130 636 if (ARG3 == 0) \
vladvana 0:23d1f73bf130 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
vladvana 0:23d1f73bf130 638 else \
vladvana 0:23d1f73bf130 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vladvana 0:23d1f73bf130 640 __RES; \
vladvana 0:23d1f73bf130 641 })
vladvana 0:23d1f73bf130 642
vladvana 0:23d1f73bf130 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
vladvana 0:23d1f73bf130 644 {
vladvana 0:23d1f73bf130 645 int32_t result;
vladvana 0:23d1f73bf130 646
vladvana 0:23d1f73bf130 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 648 return(result);
vladvana 0:23d1f73bf130 649 }
vladvana 0:23d1f73bf130 650
vladvana 0:23d1f73bf130 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 652
vladvana 0:23d1f73bf130 653
vladvana 0:23d1f73bf130 654
vladvana 0:23d1f73bf130 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
vladvana 0:23d1f73bf130 656 /* TASKING carm specific functions */
vladvana 0:23d1f73bf130 657
vladvana 0:23d1f73bf130 658
vladvana 0:23d1f73bf130 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 660 /* not yet supported */
vladvana 0:23d1f73bf130 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vladvana 0:23d1f73bf130 662
vladvana 0:23d1f73bf130 663
vladvana 0:23d1f73bf130 664 #endif
vladvana 0:23d1f73bf130 665
vladvana 0:23d1f73bf130 666 /*@} end of group CMSIS_SIMD_intrinsics */
vladvana 0:23d1f73bf130 667
vladvana 0:23d1f73bf130 668
vladvana 0:23d1f73bf130 669 #endif /* __CORE_CM4_SIMD_H */
vladvana 0:23d1f73bf130 670
vladvana 0:23d1f73bf130 671 #ifdef __cplusplus
vladvana 0:23d1f73bf130 672 }
vladvana 0:23d1f73bf130 673 #endif