pro vyuku PSS v Jecne

Committer:
vladvana
Date:
Sun Sep 24 12:31:52 2017 +0000
Revision:
0:23d1f73bf130
podklady pro cviceni z PSS

Who changed what in which revision?

UserRevisionLine numberNew contents of line
vladvana 0:23d1f73bf130 1 /**************************************************************************//**
vladvana 0:23d1f73bf130 2 * @file core_cmSimd.h
vladvana 0:23d1f73bf130 3 * @brief CMSIS Cortex-M SIMD Header File
vladvana 0:23d1f73bf130 4 * @version V4.10
vladvana 0:23d1f73bf130 5 * @date 18. March 2015
vladvana 0:23d1f73bf130 6 *
vladvana 0:23d1f73bf130 7 * @note
vladvana 0:23d1f73bf130 8 *
vladvana 0:23d1f73bf130 9 ******************************************************************************/
vladvana 0:23d1f73bf130 10 /* Copyright (c) 2009 - 2014 ARM LIMITED
vladvana 0:23d1f73bf130 11
vladvana 0:23d1f73bf130 12 All rights reserved.
vladvana 0:23d1f73bf130 13 Redistribution and use in source and binary forms, with or without
vladvana 0:23d1f73bf130 14 modification, are permitted provided that the following conditions are met:
vladvana 0:23d1f73bf130 15 - Redistributions of source code must retain the above copyright
vladvana 0:23d1f73bf130 16 notice, this list of conditions and the following disclaimer.
vladvana 0:23d1f73bf130 17 - Redistributions in binary form must reproduce the above copyright
vladvana 0:23d1f73bf130 18 notice, this list of conditions and the following disclaimer in the
vladvana 0:23d1f73bf130 19 documentation and/or other materials provided with the distribution.
vladvana 0:23d1f73bf130 20 - Neither the name of ARM nor the names of its contributors may be used
vladvana 0:23d1f73bf130 21 to endorse or promote products derived from this software without
vladvana 0:23d1f73bf130 22 specific prior written permission.
vladvana 0:23d1f73bf130 23 *
vladvana 0:23d1f73bf130 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
vladvana 0:23d1f73bf130 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
vladvana 0:23d1f73bf130 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
vladvana 0:23d1f73bf130 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
vladvana 0:23d1f73bf130 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
vladvana 0:23d1f73bf130 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
vladvana 0:23d1f73bf130 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
vladvana 0:23d1f73bf130 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
vladvana 0:23d1f73bf130 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
vladvana 0:23d1f73bf130 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
vladvana 0:23d1f73bf130 34 POSSIBILITY OF SUCH DAMAGE.
vladvana 0:23d1f73bf130 35 ---------------------------------------------------------------------------*/
vladvana 0:23d1f73bf130 36
vladvana 0:23d1f73bf130 37
vladvana 0:23d1f73bf130 38 #if defined ( __ICCARM__ )
vladvana 0:23d1f73bf130 39 #pragma system_include /* treat file as system include file for MISRA check */
vladvana 0:23d1f73bf130 40 #endif
vladvana 0:23d1f73bf130 41
vladvana 0:23d1f73bf130 42 #ifndef __CORE_CMSIMD_H
vladvana 0:23d1f73bf130 43 #define __CORE_CMSIMD_H
vladvana 0:23d1f73bf130 44
vladvana 0:23d1f73bf130 45 #ifdef __cplusplus
vladvana 0:23d1f73bf130 46 extern "C" {
vladvana 0:23d1f73bf130 47 #endif
vladvana 0:23d1f73bf130 48
vladvana 0:23d1f73bf130 49
vladvana 0:23d1f73bf130 50 /*******************************************************************************
vladvana 0:23d1f73bf130 51 * Hardware Abstraction Layer
vladvana 0:23d1f73bf130 52 ******************************************************************************/
vladvana 0:23d1f73bf130 53
vladvana 0:23d1f73bf130 54
vladvana 0:23d1f73bf130 55 /* ################### Compiler specific Intrinsics ########################### */
vladvana 0:23d1f73bf130 56 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
vladvana 0:23d1f73bf130 57 Access to dedicated SIMD instructions
vladvana 0:23d1f73bf130 58 @{
vladvana 0:23d1f73bf130 59 */
vladvana 0:23d1f73bf130 60
vladvana 0:23d1f73bf130 61 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
vladvana 0:23d1f73bf130 62 /* ARM armcc specific functions */
vladvana 0:23d1f73bf130 63 #define __SADD8 __sadd8
vladvana 0:23d1f73bf130 64 #define __QADD8 __qadd8
vladvana 0:23d1f73bf130 65 #define __SHADD8 __shadd8
vladvana 0:23d1f73bf130 66 #define __UADD8 __uadd8
vladvana 0:23d1f73bf130 67 #define __UQADD8 __uqadd8
vladvana 0:23d1f73bf130 68 #define __UHADD8 __uhadd8
vladvana 0:23d1f73bf130 69 #define __SSUB8 __ssub8
vladvana 0:23d1f73bf130 70 #define __QSUB8 __qsub8
vladvana 0:23d1f73bf130 71 #define __SHSUB8 __shsub8
vladvana 0:23d1f73bf130 72 #define __USUB8 __usub8
vladvana 0:23d1f73bf130 73 #define __UQSUB8 __uqsub8
vladvana 0:23d1f73bf130 74 #define __UHSUB8 __uhsub8
vladvana 0:23d1f73bf130 75 #define __SADD16 __sadd16
vladvana 0:23d1f73bf130 76 #define __QADD16 __qadd16
vladvana 0:23d1f73bf130 77 #define __SHADD16 __shadd16
vladvana 0:23d1f73bf130 78 #define __UADD16 __uadd16
vladvana 0:23d1f73bf130 79 #define __UQADD16 __uqadd16
vladvana 0:23d1f73bf130 80 #define __UHADD16 __uhadd16
vladvana 0:23d1f73bf130 81 #define __SSUB16 __ssub16
vladvana 0:23d1f73bf130 82 #define __QSUB16 __qsub16
vladvana 0:23d1f73bf130 83 #define __SHSUB16 __shsub16
vladvana 0:23d1f73bf130 84 #define __USUB16 __usub16
vladvana 0:23d1f73bf130 85 #define __UQSUB16 __uqsub16
vladvana 0:23d1f73bf130 86 #define __UHSUB16 __uhsub16
vladvana 0:23d1f73bf130 87 #define __SASX __sasx
vladvana 0:23d1f73bf130 88 #define __QASX __qasx
vladvana 0:23d1f73bf130 89 #define __SHASX __shasx
vladvana 0:23d1f73bf130 90 #define __UASX __uasx
vladvana 0:23d1f73bf130 91 #define __UQASX __uqasx
vladvana 0:23d1f73bf130 92 #define __UHASX __uhasx
vladvana 0:23d1f73bf130 93 #define __SSAX __ssax
vladvana 0:23d1f73bf130 94 #define __QSAX __qsax
vladvana 0:23d1f73bf130 95 #define __SHSAX __shsax
vladvana 0:23d1f73bf130 96 #define __USAX __usax
vladvana 0:23d1f73bf130 97 #define __UQSAX __uqsax
vladvana 0:23d1f73bf130 98 #define __UHSAX __uhsax
vladvana 0:23d1f73bf130 99 #define __USAD8 __usad8
vladvana 0:23d1f73bf130 100 #define __USADA8 __usada8
vladvana 0:23d1f73bf130 101 #define __SSAT16 __ssat16
vladvana 0:23d1f73bf130 102 #define __USAT16 __usat16
vladvana 0:23d1f73bf130 103 #define __UXTB16 __uxtb16
vladvana 0:23d1f73bf130 104 #define __UXTAB16 __uxtab16
vladvana 0:23d1f73bf130 105 #define __SXTB16 __sxtb16
vladvana 0:23d1f73bf130 106 #define __SXTAB16 __sxtab16
vladvana 0:23d1f73bf130 107 #define __SMUAD __smuad
vladvana 0:23d1f73bf130 108 #define __SMUADX __smuadx
vladvana 0:23d1f73bf130 109 #define __SMLAD __smlad
vladvana 0:23d1f73bf130 110 #define __SMLADX __smladx
vladvana 0:23d1f73bf130 111 #define __SMLALD __smlald
vladvana 0:23d1f73bf130 112 #define __SMLALDX __smlaldx
vladvana 0:23d1f73bf130 113 #define __SMUSD __smusd
vladvana 0:23d1f73bf130 114 #define __SMUSDX __smusdx
vladvana 0:23d1f73bf130 115 #define __SMLSD __smlsd
vladvana 0:23d1f73bf130 116 #define __SMLSDX __smlsdx
vladvana 0:23d1f73bf130 117 #define __SMLSLD __smlsld
vladvana 0:23d1f73bf130 118 #define __SMLSLDX __smlsldx
vladvana 0:23d1f73bf130 119 #define __SEL __sel
vladvana 0:23d1f73bf130 120 #define __QADD __qadd
vladvana 0:23d1f73bf130 121 #define __QSUB __qsub
vladvana 0:23d1f73bf130 122
vladvana 0:23d1f73bf130 123 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
vladvana 0:23d1f73bf130 124 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
vladvana 0:23d1f73bf130 125
vladvana 0:23d1f73bf130 126 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
vladvana 0:23d1f73bf130 127 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
vladvana 0:23d1f73bf130 128
vladvana 0:23d1f73bf130 129 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
vladvana 0:23d1f73bf130 130 ((int64_t)(ARG3) << 32) ) >> 32))
vladvana 0:23d1f73bf130 131
vladvana 0:23d1f73bf130 132
vladvana 0:23d1f73bf130 133 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
vladvana 0:23d1f73bf130 134 /* GNU gcc specific functions */
vladvana 0:23d1f73bf130 135 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 136 {
vladvana 0:23d1f73bf130 137 uint32_t result;
vladvana 0:23d1f73bf130 138
vladvana 0:23d1f73bf130 139 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 140 return(result);
vladvana 0:23d1f73bf130 141 }
vladvana 0:23d1f73bf130 142
vladvana 0:23d1f73bf130 143 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 144 {
vladvana 0:23d1f73bf130 145 uint32_t result;
vladvana 0:23d1f73bf130 146
vladvana 0:23d1f73bf130 147 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 148 return(result);
vladvana 0:23d1f73bf130 149 }
vladvana 0:23d1f73bf130 150
vladvana 0:23d1f73bf130 151 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 152 {
vladvana 0:23d1f73bf130 153 uint32_t result;
vladvana 0:23d1f73bf130 154
vladvana 0:23d1f73bf130 155 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 156 return(result);
vladvana 0:23d1f73bf130 157 }
vladvana 0:23d1f73bf130 158
vladvana 0:23d1f73bf130 159 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 160 {
vladvana 0:23d1f73bf130 161 uint32_t result;
vladvana 0:23d1f73bf130 162
vladvana 0:23d1f73bf130 163 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 164 return(result);
vladvana 0:23d1f73bf130 165 }
vladvana 0:23d1f73bf130 166
vladvana 0:23d1f73bf130 167 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 168 {
vladvana 0:23d1f73bf130 169 uint32_t result;
vladvana 0:23d1f73bf130 170
vladvana 0:23d1f73bf130 171 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 172 return(result);
vladvana 0:23d1f73bf130 173 }
vladvana 0:23d1f73bf130 174
vladvana 0:23d1f73bf130 175 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 176 {
vladvana 0:23d1f73bf130 177 uint32_t result;
vladvana 0:23d1f73bf130 178
vladvana 0:23d1f73bf130 179 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 180 return(result);
vladvana 0:23d1f73bf130 181 }
vladvana 0:23d1f73bf130 182
vladvana 0:23d1f73bf130 183
vladvana 0:23d1f73bf130 184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 185 {
vladvana 0:23d1f73bf130 186 uint32_t result;
vladvana 0:23d1f73bf130 187
vladvana 0:23d1f73bf130 188 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 189 return(result);
vladvana 0:23d1f73bf130 190 }
vladvana 0:23d1f73bf130 191
vladvana 0:23d1f73bf130 192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 193 {
vladvana 0:23d1f73bf130 194 uint32_t result;
vladvana 0:23d1f73bf130 195
vladvana 0:23d1f73bf130 196 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 197 return(result);
vladvana 0:23d1f73bf130 198 }
vladvana 0:23d1f73bf130 199
vladvana 0:23d1f73bf130 200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 201 {
vladvana 0:23d1f73bf130 202 uint32_t result;
vladvana 0:23d1f73bf130 203
vladvana 0:23d1f73bf130 204 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 205 return(result);
vladvana 0:23d1f73bf130 206 }
vladvana 0:23d1f73bf130 207
vladvana 0:23d1f73bf130 208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 209 {
vladvana 0:23d1f73bf130 210 uint32_t result;
vladvana 0:23d1f73bf130 211
vladvana 0:23d1f73bf130 212 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 213 return(result);
vladvana 0:23d1f73bf130 214 }
vladvana 0:23d1f73bf130 215
vladvana 0:23d1f73bf130 216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 217 {
vladvana 0:23d1f73bf130 218 uint32_t result;
vladvana 0:23d1f73bf130 219
vladvana 0:23d1f73bf130 220 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 221 return(result);
vladvana 0:23d1f73bf130 222 }
vladvana 0:23d1f73bf130 223
vladvana 0:23d1f73bf130 224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 225 {
vladvana 0:23d1f73bf130 226 uint32_t result;
vladvana 0:23d1f73bf130 227
vladvana 0:23d1f73bf130 228 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 229 return(result);
vladvana 0:23d1f73bf130 230 }
vladvana 0:23d1f73bf130 231
vladvana 0:23d1f73bf130 232
vladvana 0:23d1f73bf130 233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 234 {
vladvana 0:23d1f73bf130 235 uint32_t result;
vladvana 0:23d1f73bf130 236
vladvana 0:23d1f73bf130 237 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 238 return(result);
vladvana 0:23d1f73bf130 239 }
vladvana 0:23d1f73bf130 240
vladvana 0:23d1f73bf130 241 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 242 {
vladvana 0:23d1f73bf130 243 uint32_t result;
vladvana 0:23d1f73bf130 244
vladvana 0:23d1f73bf130 245 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 246 return(result);
vladvana 0:23d1f73bf130 247 }
vladvana 0:23d1f73bf130 248
vladvana 0:23d1f73bf130 249 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 250 {
vladvana 0:23d1f73bf130 251 uint32_t result;
vladvana 0:23d1f73bf130 252
vladvana 0:23d1f73bf130 253 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 254 return(result);
vladvana 0:23d1f73bf130 255 }
vladvana 0:23d1f73bf130 256
vladvana 0:23d1f73bf130 257 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 258 {
vladvana 0:23d1f73bf130 259 uint32_t result;
vladvana 0:23d1f73bf130 260
vladvana 0:23d1f73bf130 261 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 262 return(result);
vladvana 0:23d1f73bf130 263 }
vladvana 0:23d1f73bf130 264
vladvana 0:23d1f73bf130 265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 266 {
vladvana 0:23d1f73bf130 267 uint32_t result;
vladvana 0:23d1f73bf130 268
vladvana 0:23d1f73bf130 269 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 270 return(result);
vladvana 0:23d1f73bf130 271 }
vladvana 0:23d1f73bf130 272
vladvana 0:23d1f73bf130 273 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 274 {
vladvana 0:23d1f73bf130 275 uint32_t result;
vladvana 0:23d1f73bf130 276
vladvana 0:23d1f73bf130 277 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 278 return(result);
vladvana 0:23d1f73bf130 279 }
vladvana 0:23d1f73bf130 280
vladvana 0:23d1f73bf130 281 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 282 {
vladvana 0:23d1f73bf130 283 uint32_t result;
vladvana 0:23d1f73bf130 284
vladvana 0:23d1f73bf130 285 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 286 return(result);
vladvana 0:23d1f73bf130 287 }
vladvana 0:23d1f73bf130 288
vladvana 0:23d1f73bf130 289 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 290 {
vladvana 0:23d1f73bf130 291 uint32_t result;
vladvana 0:23d1f73bf130 292
vladvana 0:23d1f73bf130 293 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 294 return(result);
vladvana 0:23d1f73bf130 295 }
vladvana 0:23d1f73bf130 296
vladvana 0:23d1f73bf130 297 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 298 {
vladvana 0:23d1f73bf130 299 uint32_t result;
vladvana 0:23d1f73bf130 300
vladvana 0:23d1f73bf130 301 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 302 return(result);
vladvana 0:23d1f73bf130 303 }
vladvana 0:23d1f73bf130 304
vladvana 0:23d1f73bf130 305 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 306 {
vladvana 0:23d1f73bf130 307 uint32_t result;
vladvana 0:23d1f73bf130 308
vladvana 0:23d1f73bf130 309 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 310 return(result);
vladvana 0:23d1f73bf130 311 }
vladvana 0:23d1f73bf130 312
vladvana 0:23d1f73bf130 313 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 314 {
vladvana 0:23d1f73bf130 315 uint32_t result;
vladvana 0:23d1f73bf130 316
vladvana 0:23d1f73bf130 317 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 318 return(result);
vladvana 0:23d1f73bf130 319 }
vladvana 0:23d1f73bf130 320
vladvana 0:23d1f73bf130 321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 322 {
vladvana 0:23d1f73bf130 323 uint32_t result;
vladvana 0:23d1f73bf130 324
vladvana 0:23d1f73bf130 325 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 326 return(result);
vladvana 0:23d1f73bf130 327 }
vladvana 0:23d1f73bf130 328
vladvana 0:23d1f73bf130 329 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 330 {
vladvana 0:23d1f73bf130 331 uint32_t result;
vladvana 0:23d1f73bf130 332
vladvana 0:23d1f73bf130 333 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 334 return(result);
vladvana 0:23d1f73bf130 335 }
vladvana 0:23d1f73bf130 336
vladvana 0:23d1f73bf130 337 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 338 {
vladvana 0:23d1f73bf130 339 uint32_t result;
vladvana 0:23d1f73bf130 340
vladvana 0:23d1f73bf130 341 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 342 return(result);
vladvana 0:23d1f73bf130 343 }
vladvana 0:23d1f73bf130 344
vladvana 0:23d1f73bf130 345 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 346 {
vladvana 0:23d1f73bf130 347 uint32_t result;
vladvana 0:23d1f73bf130 348
vladvana 0:23d1f73bf130 349 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 350 return(result);
vladvana 0:23d1f73bf130 351 }
vladvana 0:23d1f73bf130 352
vladvana 0:23d1f73bf130 353 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 354 {
vladvana 0:23d1f73bf130 355 uint32_t result;
vladvana 0:23d1f73bf130 356
vladvana 0:23d1f73bf130 357 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 358 return(result);
vladvana 0:23d1f73bf130 359 }
vladvana 0:23d1f73bf130 360
vladvana 0:23d1f73bf130 361 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 362 {
vladvana 0:23d1f73bf130 363 uint32_t result;
vladvana 0:23d1f73bf130 364
vladvana 0:23d1f73bf130 365 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 366 return(result);
vladvana 0:23d1f73bf130 367 }
vladvana 0:23d1f73bf130 368
vladvana 0:23d1f73bf130 369 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 370 {
vladvana 0:23d1f73bf130 371 uint32_t result;
vladvana 0:23d1f73bf130 372
vladvana 0:23d1f73bf130 373 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 374 return(result);
vladvana 0:23d1f73bf130 375 }
vladvana 0:23d1f73bf130 376
vladvana 0:23d1f73bf130 377 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 378 {
vladvana 0:23d1f73bf130 379 uint32_t result;
vladvana 0:23d1f73bf130 380
vladvana 0:23d1f73bf130 381 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 382 return(result);
vladvana 0:23d1f73bf130 383 }
vladvana 0:23d1f73bf130 384
vladvana 0:23d1f73bf130 385 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 386 {
vladvana 0:23d1f73bf130 387 uint32_t result;
vladvana 0:23d1f73bf130 388
vladvana 0:23d1f73bf130 389 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 390 return(result);
vladvana 0:23d1f73bf130 391 }
vladvana 0:23d1f73bf130 392
vladvana 0:23d1f73bf130 393 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 394 {
vladvana 0:23d1f73bf130 395 uint32_t result;
vladvana 0:23d1f73bf130 396
vladvana 0:23d1f73bf130 397 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 398 return(result);
vladvana 0:23d1f73bf130 399 }
vladvana 0:23d1f73bf130 400
vladvana 0:23d1f73bf130 401 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 402 {
vladvana 0:23d1f73bf130 403 uint32_t result;
vladvana 0:23d1f73bf130 404
vladvana 0:23d1f73bf130 405 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 406 return(result);
vladvana 0:23d1f73bf130 407 }
vladvana 0:23d1f73bf130 408
vladvana 0:23d1f73bf130 409 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 410 {
vladvana 0:23d1f73bf130 411 uint32_t result;
vladvana 0:23d1f73bf130 412
vladvana 0:23d1f73bf130 413 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 414 return(result);
vladvana 0:23d1f73bf130 415 }
vladvana 0:23d1f73bf130 416
vladvana 0:23d1f73bf130 417 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 418 {
vladvana 0:23d1f73bf130 419 uint32_t result;
vladvana 0:23d1f73bf130 420
vladvana 0:23d1f73bf130 421 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 422 return(result);
vladvana 0:23d1f73bf130 423 }
vladvana 0:23d1f73bf130 424
vladvana 0:23d1f73bf130 425 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 426 {
vladvana 0:23d1f73bf130 427 uint32_t result;
vladvana 0:23d1f73bf130 428
vladvana 0:23d1f73bf130 429 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 430 return(result);
vladvana 0:23d1f73bf130 431 }
vladvana 0:23d1f73bf130 432
vladvana 0:23d1f73bf130 433 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 434 {
vladvana 0:23d1f73bf130 435 uint32_t result;
vladvana 0:23d1f73bf130 436
vladvana 0:23d1f73bf130 437 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 438 return(result);
vladvana 0:23d1f73bf130 439 }
vladvana 0:23d1f73bf130 440
vladvana 0:23d1f73bf130 441 #define __SSAT16(ARG1,ARG2) \
vladvana 0:23d1f73bf130 442 ({ \
vladvana 0:23d1f73bf130 443 uint32_t __RES, __ARG1 = (ARG1); \
vladvana 0:23d1f73bf130 444 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vladvana 0:23d1f73bf130 445 __RES; \
vladvana 0:23d1f73bf130 446 })
vladvana 0:23d1f73bf130 447
vladvana 0:23d1f73bf130 448 #define __USAT16(ARG1,ARG2) \
vladvana 0:23d1f73bf130 449 ({ \
vladvana 0:23d1f73bf130 450 uint32_t __RES, __ARG1 = (ARG1); \
vladvana 0:23d1f73bf130 451 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vladvana 0:23d1f73bf130 452 __RES; \
vladvana 0:23d1f73bf130 453 })
vladvana 0:23d1f73bf130 454
vladvana 0:23d1f73bf130 455 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
vladvana 0:23d1f73bf130 456 {
vladvana 0:23d1f73bf130 457 uint32_t result;
vladvana 0:23d1f73bf130 458
vladvana 0:23d1f73bf130 459 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
vladvana 0:23d1f73bf130 460 return(result);
vladvana 0:23d1f73bf130 461 }
vladvana 0:23d1f73bf130 462
vladvana 0:23d1f73bf130 463 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 464 {
vladvana 0:23d1f73bf130 465 uint32_t result;
vladvana 0:23d1f73bf130 466
vladvana 0:23d1f73bf130 467 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 468 return(result);
vladvana 0:23d1f73bf130 469 }
vladvana 0:23d1f73bf130 470
vladvana 0:23d1f73bf130 471 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
vladvana 0:23d1f73bf130 472 {
vladvana 0:23d1f73bf130 473 uint32_t result;
vladvana 0:23d1f73bf130 474
vladvana 0:23d1f73bf130 475 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
vladvana 0:23d1f73bf130 476 return(result);
vladvana 0:23d1f73bf130 477 }
vladvana 0:23d1f73bf130 478
vladvana 0:23d1f73bf130 479 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 480 {
vladvana 0:23d1f73bf130 481 uint32_t result;
vladvana 0:23d1f73bf130 482
vladvana 0:23d1f73bf130 483 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 484 return(result);
vladvana 0:23d1f73bf130 485 }
vladvana 0:23d1f73bf130 486
vladvana 0:23d1f73bf130 487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 488 {
vladvana 0:23d1f73bf130 489 uint32_t result;
vladvana 0:23d1f73bf130 490
vladvana 0:23d1f73bf130 491 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 492 return(result);
vladvana 0:23d1f73bf130 493 }
vladvana 0:23d1f73bf130 494
vladvana 0:23d1f73bf130 495 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 496 {
vladvana 0:23d1f73bf130 497 uint32_t result;
vladvana 0:23d1f73bf130 498
vladvana 0:23d1f73bf130 499 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 500 return(result);
vladvana 0:23d1f73bf130 501 }
vladvana 0:23d1f73bf130 502
vladvana 0:23d1f73bf130 503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 504 {
vladvana 0:23d1f73bf130 505 uint32_t result;
vladvana 0:23d1f73bf130 506
vladvana 0:23d1f73bf130 507 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 508 return(result);
vladvana 0:23d1f73bf130 509 }
vladvana 0:23d1f73bf130 510
vladvana 0:23d1f73bf130 511 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 512 {
vladvana 0:23d1f73bf130 513 uint32_t result;
vladvana 0:23d1f73bf130 514
vladvana 0:23d1f73bf130 515 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 516 return(result);
vladvana 0:23d1f73bf130 517 }
vladvana 0:23d1f73bf130 518
vladvana 0:23d1f73bf130 519 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
vladvana 0:23d1f73bf130 520 {
vladvana 0:23d1f73bf130 521 union llreg_u{
vladvana 0:23d1f73bf130 522 uint32_t w32[2];
vladvana 0:23d1f73bf130 523 uint64_t w64;
vladvana 0:23d1f73bf130 524 } llr;
vladvana 0:23d1f73bf130 525 llr.w64 = acc;
vladvana 0:23d1f73bf130 526
vladvana 0:23d1f73bf130 527 #ifndef __ARMEB__ // Little endian
vladvana 0:23d1f73bf130 528 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vladvana 0:23d1f73bf130 529 #else // Big endian
vladvana 0:23d1f73bf130 530 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vladvana 0:23d1f73bf130 531 #endif
vladvana 0:23d1f73bf130 532
vladvana 0:23d1f73bf130 533 return(llr.w64);
vladvana 0:23d1f73bf130 534 }
vladvana 0:23d1f73bf130 535
vladvana 0:23d1f73bf130 536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
vladvana 0:23d1f73bf130 537 {
vladvana 0:23d1f73bf130 538 union llreg_u{
vladvana 0:23d1f73bf130 539 uint32_t w32[2];
vladvana 0:23d1f73bf130 540 uint64_t w64;
vladvana 0:23d1f73bf130 541 } llr;
vladvana 0:23d1f73bf130 542 llr.w64 = acc;
vladvana 0:23d1f73bf130 543
vladvana 0:23d1f73bf130 544 #ifndef __ARMEB__ // Little endian
vladvana 0:23d1f73bf130 545 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vladvana 0:23d1f73bf130 546 #else // Big endian
vladvana 0:23d1f73bf130 547 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vladvana 0:23d1f73bf130 548 #endif
vladvana 0:23d1f73bf130 549
vladvana 0:23d1f73bf130 550 return(llr.w64);
vladvana 0:23d1f73bf130 551 }
vladvana 0:23d1f73bf130 552
vladvana 0:23d1f73bf130 553 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 554 {
vladvana 0:23d1f73bf130 555 uint32_t result;
vladvana 0:23d1f73bf130 556
vladvana 0:23d1f73bf130 557 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 558 return(result);
vladvana 0:23d1f73bf130 559 }
vladvana 0:23d1f73bf130 560
vladvana 0:23d1f73bf130 561 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 562 {
vladvana 0:23d1f73bf130 563 uint32_t result;
vladvana 0:23d1f73bf130 564
vladvana 0:23d1f73bf130 565 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 566 return(result);
vladvana 0:23d1f73bf130 567 }
vladvana 0:23d1f73bf130 568
vladvana 0:23d1f73bf130 569 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 570 {
vladvana 0:23d1f73bf130 571 uint32_t result;
vladvana 0:23d1f73bf130 572
vladvana 0:23d1f73bf130 573 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 574 return(result);
vladvana 0:23d1f73bf130 575 }
vladvana 0:23d1f73bf130 576
vladvana 0:23d1f73bf130 577 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
vladvana 0:23d1f73bf130 578 {
vladvana 0:23d1f73bf130 579 uint32_t result;
vladvana 0:23d1f73bf130 580
vladvana 0:23d1f73bf130 581 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 582 return(result);
vladvana 0:23d1f73bf130 583 }
vladvana 0:23d1f73bf130 584
vladvana 0:23d1f73bf130 585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
vladvana 0:23d1f73bf130 586 {
vladvana 0:23d1f73bf130 587 union llreg_u{
vladvana 0:23d1f73bf130 588 uint32_t w32[2];
vladvana 0:23d1f73bf130 589 uint64_t w64;
vladvana 0:23d1f73bf130 590 } llr;
vladvana 0:23d1f73bf130 591 llr.w64 = acc;
vladvana 0:23d1f73bf130 592
vladvana 0:23d1f73bf130 593 #ifndef __ARMEB__ // Little endian
vladvana 0:23d1f73bf130 594 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vladvana 0:23d1f73bf130 595 #else // Big endian
vladvana 0:23d1f73bf130 596 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vladvana 0:23d1f73bf130 597 #endif
vladvana 0:23d1f73bf130 598
vladvana 0:23d1f73bf130 599 return(llr.w64);
vladvana 0:23d1f73bf130 600 }
vladvana 0:23d1f73bf130 601
vladvana 0:23d1f73bf130 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
vladvana 0:23d1f73bf130 603 {
vladvana 0:23d1f73bf130 604 union llreg_u{
vladvana 0:23d1f73bf130 605 uint32_t w32[2];
vladvana 0:23d1f73bf130 606 uint64_t w64;
vladvana 0:23d1f73bf130 607 } llr;
vladvana 0:23d1f73bf130 608 llr.w64 = acc;
vladvana 0:23d1f73bf130 609
vladvana 0:23d1f73bf130 610 #ifndef __ARMEB__ // Little endian
vladvana 0:23d1f73bf130 611 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vladvana 0:23d1f73bf130 612 #else // Big endian
vladvana 0:23d1f73bf130 613 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vladvana 0:23d1f73bf130 614 #endif
vladvana 0:23d1f73bf130 615
vladvana 0:23d1f73bf130 616 return(llr.w64);
vladvana 0:23d1f73bf130 617 }
vladvana 0:23d1f73bf130 618
vladvana 0:23d1f73bf130 619 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 620 {
vladvana 0:23d1f73bf130 621 uint32_t result;
vladvana 0:23d1f73bf130 622
vladvana 0:23d1f73bf130 623 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 624 return(result);
vladvana 0:23d1f73bf130 625 }
vladvana 0:23d1f73bf130 626
vladvana 0:23d1f73bf130 627 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 628 {
vladvana 0:23d1f73bf130 629 uint32_t result;
vladvana 0:23d1f73bf130 630
vladvana 0:23d1f73bf130 631 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 632 return(result);
vladvana 0:23d1f73bf130 633 }
vladvana 0:23d1f73bf130 634
vladvana 0:23d1f73bf130 635 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
vladvana 0:23d1f73bf130 636 {
vladvana 0:23d1f73bf130 637 uint32_t result;
vladvana 0:23d1f73bf130 638
vladvana 0:23d1f73bf130 639 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vladvana 0:23d1f73bf130 640 return(result);
vladvana 0:23d1f73bf130 641 }
vladvana 0:23d1f73bf130 642
vladvana 0:23d1f73bf130 643 #define __PKHBT(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 644 ({ \
vladvana 0:23d1f73bf130 645 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vladvana 0:23d1f73bf130 646 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vladvana 0:23d1f73bf130 647 __RES; \
vladvana 0:23d1f73bf130 648 })
vladvana 0:23d1f73bf130 649
vladvana 0:23d1f73bf130 650 #define __PKHTB(ARG1,ARG2,ARG3) \
vladvana 0:23d1f73bf130 651 ({ \
vladvana 0:23d1f73bf130 652 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vladvana 0:23d1f73bf130 653 if (ARG3 == 0) \
vladvana 0:23d1f73bf130 654 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
vladvana 0:23d1f73bf130 655 else \
vladvana 0:23d1f73bf130 656 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vladvana 0:23d1f73bf130 657 __RES; \
vladvana 0:23d1f73bf130 658 })
vladvana 0:23d1f73bf130 659
vladvana 0:23d1f73bf130 660 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
vladvana 0:23d1f73bf130 661 {
vladvana 0:23d1f73bf130 662 int32_t result;
vladvana 0:23d1f73bf130 663
vladvana 0:23d1f73bf130 664 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
vladvana 0:23d1f73bf130 665 return(result);
vladvana 0:23d1f73bf130 666 }
vladvana 0:23d1f73bf130 667
vladvana 0:23d1f73bf130 668
vladvana 0:23d1f73bf130 669 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
vladvana 0:23d1f73bf130 670 /* IAR iccarm specific functions */
vladvana 0:23d1f73bf130 671 #include <cmsis_iar.h>
vladvana 0:23d1f73bf130 672
vladvana 0:23d1f73bf130 673
vladvana 0:23d1f73bf130 674 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
vladvana 0:23d1f73bf130 675 /* TI CCS specific functions */
vladvana 0:23d1f73bf130 676 #include <cmsis_ccs.h>
vladvana 0:23d1f73bf130 677
vladvana 0:23d1f73bf130 678
vladvana 0:23d1f73bf130 679 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
vladvana 0:23d1f73bf130 680 /* TASKING carm specific functions */
vladvana 0:23d1f73bf130 681 /* not yet supported */
vladvana 0:23d1f73bf130 682
vladvana 0:23d1f73bf130 683
vladvana 0:23d1f73bf130 684 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
vladvana 0:23d1f73bf130 685 /* Cosmic specific functions */
vladvana 0:23d1f73bf130 686 #include <cmsis_csm.h>
vladvana 0:23d1f73bf130 687
vladvana 0:23d1f73bf130 688 #endif
vladvana 0:23d1f73bf130 689
vladvana 0:23d1f73bf130 690 /*@} end of group CMSIS_SIMD_intrinsics */
vladvana 0:23d1f73bf130 691
vladvana 0:23d1f73bf130 692
vladvana 0:23d1f73bf130 693 #ifdef __cplusplus
vladvana 0:23d1f73bf130 694 }
vladvana 0:23d1f73bf130 695 #endif
vladvana 0:23d1f73bf130 696
vladvana 0:23d1f73bf130 697 #endif /* __CORE_CMSIMD_H */