mbed.h library with any bug fixes AV finds.
Dependents: micromouse4_encoder_testing PID_Test Lab1_Test WorkingPID ... more
targets/cmsis/core_cmSimd.h@1:ebce2ad32f95, 2015-11-02 (annotated)
- Committer:
- aravindsv
- Date:
- Mon Nov 02 03:07:12 2015 +0000
- Revision:
- 1:ebce2ad32f95
- Parent:
- 0:ba7650f404af
Changed the RCC timeout value to 500 ms, so total code startup time before program starts running is ~1s. Hopefully no side-effects from lower startup timeouts
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
aravindsv | 0:ba7650f404af | 1 | /**************************************************************************//** |
aravindsv | 0:ba7650f404af | 2 | * @file core_cmSimd.h |
aravindsv | 0:ba7650f404af | 3 | * @brief CMSIS Cortex-M SIMD Header File |
aravindsv | 0:ba7650f404af | 4 | * @version V4.10 |
aravindsv | 0:ba7650f404af | 5 | * @date 18. March 2015 |
aravindsv | 0:ba7650f404af | 6 | * |
aravindsv | 0:ba7650f404af | 7 | * @note |
aravindsv | 0:ba7650f404af | 8 | * |
aravindsv | 0:ba7650f404af | 9 | ******************************************************************************/ |
aravindsv | 0:ba7650f404af | 10 | /* Copyright (c) 2009 - 2014 ARM LIMITED |
aravindsv | 0:ba7650f404af | 11 | |
aravindsv | 0:ba7650f404af | 12 | All rights reserved. |
aravindsv | 0:ba7650f404af | 13 | Redistribution and use in source and binary forms, with or without |
aravindsv | 0:ba7650f404af | 14 | modification, are permitted provided that the following conditions are met: |
aravindsv | 0:ba7650f404af | 15 | - Redistributions of source code must retain the above copyright |
aravindsv | 0:ba7650f404af | 16 | notice, this list of conditions and the following disclaimer. |
aravindsv | 0:ba7650f404af | 17 | - Redistributions in binary form must reproduce the above copyright |
aravindsv | 0:ba7650f404af | 18 | notice, this list of conditions and the following disclaimer in the |
aravindsv | 0:ba7650f404af | 19 | documentation and/or other materials provided with the distribution. |
aravindsv | 0:ba7650f404af | 20 | - Neither the name of ARM nor the names of its contributors may be used |
aravindsv | 0:ba7650f404af | 21 | to endorse or promote products derived from this software without |
aravindsv | 0:ba7650f404af | 22 | specific prior written permission. |
aravindsv | 0:ba7650f404af | 23 | * |
aravindsv | 0:ba7650f404af | 24 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
aravindsv | 0:ba7650f404af | 25 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
aravindsv | 0:ba7650f404af | 26 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
aravindsv | 0:ba7650f404af | 27 | ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE |
aravindsv | 0:ba7650f404af | 28 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
aravindsv | 0:ba7650f404af | 29 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
aravindsv | 0:ba7650f404af | 30 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
aravindsv | 0:ba7650f404af | 31 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
aravindsv | 0:ba7650f404af | 32 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
aravindsv | 0:ba7650f404af | 33 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
aravindsv | 0:ba7650f404af | 34 | POSSIBILITY OF SUCH DAMAGE. |
aravindsv | 0:ba7650f404af | 35 | ---------------------------------------------------------------------------*/ |
aravindsv | 0:ba7650f404af | 36 | |
aravindsv | 0:ba7650f404af | 37 | |
aravindsv | 0:ba7650f404af | 38 | #if defined ( __ICCARM__ ) |
aravindsv | 0:ba7650f404af | 39 | #pragma system_include /* treat file as system include file for MISRA check */ |
aravindsv | 0:ba7650f404af | 40 | #endif |
aravindsv | 0:ba7650f404af | 41 | |
aravindsv | 0:ba7650f404af | 42 | #ifndef __CORE_CMSIMD_H |
aravindsv | 0:ba7650f404af | 43 | #define __CORE_CMSIMD_H |
aravindsv | 0:ba7650f404af | 44 | |
aravindsv | 0:ba7650f404af | 45 | #ifdef __cplusplus |
aravindsv | 0:ba7650f404af | 46 | extern "C" { |
aravindsv | 0:ba7650f404af | 47 | #endif |
aravindsv | 0:ba7650f404af | 48 | |
aravindsv | 0:ba7650f404af | 49 | |
aravindsv | 0:ba7650f404af | 50 | /******************************************************************************* |
aravindsv | 0:ba7650f404af | 51 | * Hardware Abstraction Layer |
aravindsv | 0:ba7650f404af | 52 | ******************************************************************************/ |
aravindsv | 0:ba7650f404af | 53 | |
aravindsv | 0:ba7650f404af | 54 | |
aravindsv | 0:ba7650f404af | 55 | /* ################### Compiler specific Intrinsics ########################### */ |
aravindsv | 0:ba7650f404af | 56 | /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics |
aravindsv | 0:ba7650f404af | 57 | Access to dedicated SIMD instructions |
aravindsv | 0:ba7650f404af | 58 | @{ |
aravindsv | 0:ba7650f404af | 59 | */ |
aravindsv | 0:ba7650f404af | 60 | |
aravindsv | 0:ba7650f404af | 61 | #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/ |
aravindsv | 0:ba7650f404af | 62 | /* ARM armcc specific functions */ |
aravindsv | 0:ba7650f404af | 63 | #define __SADD8 __sadd8 |
aravindsv | 0:ba7650f404af | 64 | #define __QADD8 __qadd8 |
aravindsv | 0:ba7650f404af | 65 | #define __SHADD8 __shadd8 |
aravindsv | 0:ba7650f404af | 66 | #define __UADD8 __uadd8 |
aravindsv | 0:ba7650f404af | 67 | #define __UQADD8 __uqadd8 |
aravindsv | 0:ba7650f404af | 68 | #define __UHADD8 __uhadd8 |
aravindsv | 0:ba7650f404af | 69 | #define __SSUB8 __ssub8 |
aravindsv | 0:ba7650f404af | 70 | #define __QSUB8 __qsub8 |
aravindsv | 0:ba7650f404af | 71 | #define __SHSUB8 __shsub8 |
aravindsv | 0:ba7650f404af | 72 | #define __USUB8 __usub8 |
aravindsv | 0:ba7650f404af | 73 | #define __UQSUB8 __uqsub8 |
aravindsv | 0:ba7650f404af | 74 | #define __UHSUB8 __uhsub8 |
aravindsv | 0:ba7650f404af | 75 | #define __SADD16 __sadd16 |
aravindsv | 0:ba7650f404af | 76 | #define __QADD16 __qadd16 |
aravindsv | 0:ba7650f404af | 77 | #define __SHADD16 __shadd16 |
aravindsv | 0:ba7650f404af | 78 | #define __UADD16 __uadd16 |
aravindsv | 0:ba7650f404af | 79 | #define __UQADD16 __uqadd16 |
aravindsv | 0:ba7650f404af | 80 | #define __UHADD16 __uhadd16 |
aravindsv | 0:ba7650f404af | 81 | #define __SSUB16 __ssub16 |
aravindsv | 0:ba7650f404af | 82 | #define __QSUB16 __qsub16 |
aravindsv | 0:ba7650f404af | 83 | #define __SHSUB16 __shsub16 |
aravindsv | 0:ba7650f404af | 84 | #define __USUB16 __usub16 |
aravindsv | 0:ba7650f404af | 85 | #define __UQSUB16 __uqsub16 |
aravindsv | 0:ba7650f404af | 86 | #define __UHSUB16 __uhsub16 |
aravindsv | 0:ba7650f404af | 87 | #define __SASX __sasx |
aravindsv | 0:ba7650f404af | 88 | #define __QASX __qasx |
aravindsv | 0:ba7650f404af | 89 | #define __SHASX __shasx |
aravindsv | 0:ba7650f404af | 90 | #define __UASX __uasx |
aravindsv | 0:ba7650f404af | 91 | #define __UQASX __uqasx |
aravindsv | 0:ba7650f404af | 92 | #define __UHASX __uhasx |
aravindsv | 0:ba7650f404af | 93 | #define __SSAX __ssax |
aravindsv | 0:ba7650f404af | 94 | #define __QSAX __qsax |
aravindsv | 0:ba7650f404af | 95 | #define __SHSAX __shsax |
aravindsv | 0:ba7650f404af | 96 | #define __USAX __usax |
aravindsv | 0:ba7650f404af | 97 | #define __UQSAX __uqsax |
aravindsv | 0:ba7650f404af | 98 | #define __UHSAX __uhsax |
aravindsv | 0:ba7650f404af | 99 | #define __USAD8 __usad8 |
aravindsv | 0:ba7650f404af | 100 | #define __USADA8 __usada8 |
aravindsv | 0:ba7650f404af | 101 | #define __SSAT16 __ssat16 |
aravindsv | 0:ba7650f404af | 102 | #define __USAT16 __usat16 |
aravindsv | 0:ba7650f404af | 103 | #define __UXTB16 __uxtb16 |
aravindsv | 0:ba7650f404af | 104 | #define __UXTAB16 __uxtab16 |
aravindsv | 0:ba7650f404af | 105 | #define __SXTB16 __sxtb16 |
aravindsv | 0:ba7650f404af | 106 | #define __SXTAB16 __sxtab16 |
aravindsv | 0:ba7650f404af | 107 | #define __SMUAD __smuad |
aravindsv | 0:ba7650f404af | 108 | #define __SMUADX __smuadx |
aravindsv | 0:ba7650f404af | 109 | #define __SMLAD __smlad |
aravindsv | 0:ba7650f404af | 110 | #define __SMLADX __smladx |
aravindsv | 0:ba7650f404af | 111 | #define __SMLALD __smlald |
aravindsv | 0:ba7650f404af | 112 | #define __SMLALDX __smlaldx |
aravindsv | 0:ba7650f404af | 113 | #define __SMUSD __smusd |
aravindsv | 0:ba7650f404af | 114 | #define __SMUSDX __smusdx |
aravindsv | 0:ba7650f404af | 115 | #define __SMLSD __smlsd |
aravindsv | 0:ba7650f404af | 116 | #define __SMLSDX __smlsdx |
aravindsv | 0:ba7650f404af | 117 | #define __SMLSLD __smlsld |
aravindsv | 0:ba7650f404af | 118 | #define __SMLSLDX __smlsldx |
aravindsv | 0:ba7650f404af | 119 | #define __SEL __sel |
aravindsv | 0:ba7650f404af | 120 | #define __QADD __qadd |
aravindsv | 0:ba7650f404af | 121 | #define __QSUB __qsub |
aravindsv | 0:ba7650f404af | 122 | |
aravindsv | 0:ba7650f404af | 123 | #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ |
aravindsv | 0:ba7650f404af | 124 | ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) |
aravindsv | 0:ba7650f404af | 125 | |
aravindsv | 0:ba7650f404af | 126 | #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ |
aravindsv | 0:ba7650f404af | 127 | ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) |
aravindsv | 0:ba7650f404af | 128 | |
aravindsv | 0:ba7650f404af | 129 | #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \ |
aravindsv | 0:ba7650f404af | 130 | ((int64_t)(ARG3) << 32) ) >> 32)) |
aravindsv | 0:ba7650f404af | 131 | |
aravindsv | 0:ba7650f404af | 132 | |
aravindsv | 0:ba7650f404af | 133 | #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/ |
aravindsv | 0:ba7650f404af | 134 | /* GNU gcc specific functions */ |
aravindsv | 0:ba7650f404af | 135 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 136 | { |
aravindsv | 0:ba7650f404af | 137 | uint32_t result; |
aravindsv | 0:ba7650f404af | 138 | |
aravindsv | 0:ba7650f404af | 139 | __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 140 | return(result); |
aravindsv | 0:ba7650f404af | 141 | } |
aravindsv | 0:ba7650f404af | 142 | |
aravindsv | 0:ba7650f404af | 143 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 144 | { |
aravindsv | 0:ba7650f404af | 145 | uint32_t result; |
aravindsv | 0:ba7650f404af | 146 | |
aravindsv | 0:ba7650f404af | 147 | __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 148 | return(result); |
aravindsv | 0:ba7650f404af | 149 | } |
aravindsv | 0:ba7650f404af | 150 | |
aravindsv | 0:ba7650f404af | 151 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 152 | { |
aravindsv | 0:ba7650f404af | 153 | uint32_t result; |
aravindsv | 0:ba7650f404af | 154 | |
aravindsv | 0:ba7650f404af | 155 | __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 156 | return(result); |
aravindsv | 0:ba7650f404af | 157 | } |
aravindsv | 0:ba7650f404af | 158 | |
aravindsv | 0:ba7650f404af | 159 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 160 | { |
aravindsv | 0:ba7650f404af | 161 | uint32_t result; |
aravindsv | 0:ba7650f404af | 162 | |
aravindsv | 0:ba7650f404af | 163 | __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 164 | return(result); |
aravindsv | 0:ba7650f404af | 165 | } |
aravindsv | 0:ba7650f404af | 166 | |
aravindsv | 0:ba7650f404af | 167 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 168 | { |
aravindsv | 0:ba7650f404af | 169 | uint32_t result; |
aravindsv | 0:ba7650f404af | 170 | |
aravindsv | 0:ba7650f404af | 171 | __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 172 | return(result); |
aravindsv | 0:ba7650f404af | 173 | } |
aravindsv | 0:ba7650f404af | 174 | |
aravindsv | 0:ba7650f404af | 175 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 176 | { |
aravindsv | 0:ba7650f404af | 177 | uint32_t result; |
aravindsv | 0:ba7650f404af | 178 | |
aravindsv | 0:ba7650f404af | 179 | __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 180 | return(result); |
aravindsv | 0:ba7650f404af | 181 | } |
aravindsv | 0:ba7650f404af | 182 | |
aravindsv | 0:ba7650f404af | 183 | |
aravindsv | 0:ba7650f404af | 184 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 185 | { |
aravindsv | 0:ba7650f404af | 186 | uint32_t result; |
aravindsv | 0:ba7650f404af | 187 | |
aravindsv | 0:ba7650f404af | 188 | __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 189 | return(result); |
aravindsv | 0:ba7650f404af | 190 | } |
aravindsv | 0:ba7650f404af | 191 | |
aravindsv | 0:ba7650f404af | 192 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 193 | { |
aravindsv | 0:ba7650f404af | 194 | uint32_t result; |
aravindsv | 0:ba7650f404af | 195 | |
aravindsv | 0:ba7650f404af | 196 | __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 197 | return(result); |
aravindsv | 0:ba7650f404af | 198 | } |
aravindsv | 0:ba7650f404af | 199 | |
aravindsv | 0:ba7650f404af | 200 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 201 | { |
aravindsv | 0:ba7650f404af | 202 | uint32_t result; |
aravindsv | 0:ba7650f404af | 203 | |
aravindsv | 0:ba7650f404af | 204 | __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 205 | return(result); |
aravindsv | 0:ba7650f404af | 206 | } |
aravindsv | 0:ba7650f404af | 207 | |
aravindsv | 0:ba7650f404af | 208 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 209 | { |
aravindsv | 0:ba7650f404af | 210 | uint32_t result; |
aravindsv | 0:ba7650f404af | 211 | |
aravindsv | 0:ba7650f404af | 212 | __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 213 | return(result); |
aravindsv | 0:ba7650f404af | 214 | } |
aravindsv | 0:ba7650f404af | 215 | |
aravindsv | 0:ba7650f404af | 216 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 217 | { |
aravindsv | 0:ba7650f404af | 218 | uint32_t result; |
aravindsv | 0:ba7650f404af | 219 | |
aravindsv | 0:ba7650f404af | 220 | __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 221 | return(result); |
aravindsv | 0:ba7650f404af | 222 | } |
aravindsv | 0:ba7650f404af | 223 | |
aravindsv | 0:ba7650f404af | 224 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 225 | { |
aravindsv | 0:ba7650f404af | 226 | uint32_t result; |
aravindsv | 0:ba7650f404af | 227 | |
aravindsv | 0:ba7650f404af | 228 | __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 229 | return(result); |
aravindsv | 0:ba7650f404af | 230 | } |
aravindsv | 0:ba7650f404af | 231 | |
aravindsv | 0:ba7650f404af | 232 | |
aravindsv | 0:ba7650f404af | 233 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 234 | { |
aravindsv | 0:ba7650f404af | 235 | uint32_t result; |
aravindsv | 0:ba7650f404af | 236 | |
aravindsv | 0:ba7650f404af | 237 | __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 238 | return(result); |
aravindsv | 0:ba7650f404af | 239 | } |
aravindsv | 0:ba7650f404af | 240 | |
aravindsv | 0:ba7650f404af | 241 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 242 | { |
aravindsv | 0:ba7650f404af | 243 | uint32_t result; |
aravindsv | 0:ba7650f404af | 244 | |
aravindsv | 0:ba7650f404af | 245 | __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 246 | return(result); |
aravindsv | 0:ba7650f404af | 247 | } |
aravindsv | 0:ba7650f404af | 248 | |
aravindsv | 0:ba7650f404af | 249 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 250 | { |
aravindsv | 0:ba7650f404af | 251 | uint32_t result; |
aravindsv | 0:ba7650f404af | 252 | |
aravindsv | 0:ba7650f404af | 253 | __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 254 | return(result); |
aravindsv | 0:ba7650f404af | 255 | } |
aravindsv | 0:ba7650f404af | 256 | |
aravindsv | 0:ba7650f404af | 257 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 258 | { |
aravindsv | 0:ba7650f404af | 259 | uint32_t result; |
aravindsv | 0:ba7650f404af | 260 | |
aravindsv | 0:ba7650f404af | 261 | __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 262 | return(result); |
aravindsv | 0:ba7650f404af | 263 | } |
aravindsv | 0:ba7650f404af | 264 | |
aravindsv | 0:ba7650f404af | 265 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 266 | { |
aravindsv | 0:ba7650f404af | 267 | uint32_t result; |
aravindsv | 0:ba7650f404af | 268 | |
aravindsv | 0:ba7650f404af | 269 | __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 270 | return(result); |
aravindsv | 0:ba7650f404af | 271 | } |
aravindsv | 0:ba7650f404af | 272 | |
aravindsv | 0:ba7650f404af | 273 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 274 | { |
aravindsv | 0:ba7650f404af | 275 | uint32_t result; |
aravindsv | 0:ba7650f404af | 276 | |
aravindsv | 0:ba7650f404af | 277 | __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 278 | return(result); |
aravindsv | 0:ba7650f404af | 279 | } |
aravindsv | 0:ba7650f404af | 280 | |
aravindsv | 0:ba7650f404af | 281 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 282 | { |
aravindsv | 0:ba7650f404af | 283 | uint32_t result; |
aravindsv | 0:ba7650f404af | 284 | |
aravindsv | 0:ba7650f404af | 285 | __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 286 | return(result); |
aravindsv | 0:ba7650f404af | 287 | } |
aravindsv | 0:ba7650f404af | 288 | |
aravindsv | 0:ba7650f404af | 289 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 290 | { |
aravindsv | 0:ba7650f404af | 291 | uint32_t result; |
aravindsv | 0:ba7650f404af | 292 | |
aravindsv | 0:ba7650f404af | 293 | __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 294 | return(result); |
aravindsv | 0:ba7650f404af | 295 | } |
aravindsv | 0:ba7650f404af | 296 | |
aravindsv | 0:ba7650f404af | 297 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 298 | { |
aravindsv | 0:ba7650f404af | 299 | uint32_t result; |
aravindsv | 0:ba7650f404af | 300 | |
aravindsv | 0:ba7650f404af | 301 | __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 302 | return(result); |
aravindsv | 0:ba7650f404af | 303 | } |
aravindsv | 0:ba7650f404af | 304 | |
aravindsv | 0:ba7650f404af | 305 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 306 | { |
aravindsv | 0:ba7650f404af | 307 | uint32_t result; |
aravindsv | 0:ba7650f404af | 308 | |
aravindsv | 0:ba7650f404af | 309 | __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 310 | return(result); |
aravindsv | 0:ba7650f404af | 311 | } |
aravindsv | 0:ba7650f404af | 312 | |
aravindsv | 0:ba7650f404af | 313 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 314 | { |
aravindsv | 0:ba7650f404af | 315 | uint32_t result; |
aravindsv | 0:ba7650f404af | 316 | |
aravindsv | 0:ba7650f404af | 317 | __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 318 | return(result); |
aravindsv | 0:ba7650f404af | 319 | } |
aravindsv | 0:ba7650f404af | 320 | |
aravindsv | 0:ba7650f404af | 321 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 322 | { |
aravindsv | 0:ba7650f404af | 323 | uint32_t result; |
aravindsv | 0:ba7650f404af | 324 | |
aravindsv | 0:ba7650f404af | 325 | __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 326 | return(result); |
aravindsv | 0:ba7650f404af | 327 | } |
aravindsv | 0:ba7650f404af | 328 | |
aravindsv | 0:ba7650f404af | 329 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 330 | { |
aravindsv | 0:ba7650f404af | 331 | uint32_t result; |
aravindsv | 0:ba7650f404af | 332 | |
aravindsv | 0:ba7650f404af | 333 | __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 334 | return(result); |
aravindsv | 0:ba7650f404af | 335 | } |
aravindsv | 0:ba7650f404af | 336 | |
aravindsv | 0:ba7650f404af | 337 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 338 | { |
aravindsv | 0:ba7650f404af | 339 | uint32_t result; |
aravindsv | 0:ba7650f404af | 340 | |
aravindsv | 0:ba7650f404af | 341 | __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 342 | return(result); |
aravindsv | 0:ba7650f404af | 343 | } |
aravindsv | 0:ba7650f404af | 344 | |
aravindsv | 0:ba7650f404af | 345 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 346 | { |
aravindsv | 0:ba7650f404af | 347 | uint32_t result; |
aravindsv | 0:ba7650f404af | 348 | |
aravindsv | 0:ba7650f404af | 349 | __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 350 | return(result); |
aravindsv | 0:ba7650f404af | 351 | } |
aravindsv | 0:ba7650f404af | 352 | |
aravindsv | 0:ba7650f404af | 353 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 354 | { |
aravindsv | 0:ba7650f404af | 355 | uint32_t result; |
aravindsv | 0:ba7650f404af | 356 | |
aravindsv | 0:ba7650f404af | 357 | __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 358 | return(result); |
aravindsv | 0:ba7650f404af | 359 | } |
aravindsv | 0:ba7650f404af | 360 | |
aravindsv | 0:ba7650f404af | 361 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 362 | { |
aravindsv | 0:ba7650f404af | 363 | uint32_t result; |
aravindsv | 0:ba7650f404af | 364 | |
aravindsv | 0:ba7650f404af | 365 | __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 366 | return(result); |
aravindsv | 0:ba7650f404af | 367 | } |
aravindsv | 0:ba7650f404af | 368 | |
aravindsv | 0:ba7650f404af | 369 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 370 | { |
aravindsv | 0:ba7650f404af | 371 | uint32_t result; |
aravindsv | 0:ba7650f404af | 372 | |
aravindsv | 0:ba7650f404af | 373 | __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 374 | return(result); |
aravindsv | 0:ba7650f404af | 375 | } |
aravindsv | 0:ba7650f404af | 376 | |
aravindsv | 0:ba7650f404af | 377 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 378 | { |
aravindsv | 0:ba7650f404af | 379 | uint32_t result; |
aravindsv | 0:ba7650f404af | 380 | |
aravindsv | 0:ba7650f404af | 381 | __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 382 | return(result); |
aravindsv | 0:ba7650f404af | 383 | } |
aravindsv | 0:ba7650f404af | 384 | |
aravindsv | 0:ba7650f404af | 385 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 386 | { |
aravindsv | 0:ba7650f404af | 387 | uint32_t result; |
aravindsv | 0:ba7650f404af | 388 | |
aravindsv | 0:ba7650f404af | 389 | __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 390 | return(result); |
aravindsv | 0:ba7650f404af | 391 | } |
aravindsv | 0:ba7650f404af | 392 | |
aravindsv | 0:ba7650f404af | 393 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 394 | { |
aravindsv | 0:ba7650f404af | 395 | uint32_t result; |
aravindsv | 0:ba7650f404af | 396 | |
aravindsv | 0:ba7650f404af | 397 | __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 398 | return(result); |
aravindsv | 0:ba7650f404af | 399 | } |
aravindsv | 0:ba7650f404af | 400 | |
aravindsv | 0:ba7650f404af | 401 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 402 | { |
aravindsv | 0:ba7650f404af | 403 | uint32_t result; |
aravindsv | 0:ba7650f404af | 404 | |
aravindsv | 0:ba7650f404af | 405 | __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 406 | return(result); |
aravindsv | 0:ba7650f404af | 407 | } |
aravindsv | 0:ba7650f404af | 408 | |
aravindsv | 0:ba7650f404af | 409 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 410 | { |
aravindsv | 0:ba7650f404af | 411 | uint32_t result; |
aravindsv | 0:ba7650f404af | 412 | |
aravindsv | 0:ba7650f404af | 413 | __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 414 | return(result); |
aravindsv | 0:ba7650f404af | 415 | } |
aravindsv | 0:ba7650f404af | 416 | |
aravindsv | 0:ba7650f404af | 417 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 418 | { |
aravindsv | 0:ba7650f404af | 419 | uint32_t result; |
aravindsv | 0:ba7650f404af | 420 | |
aravindsv | 0:ba7650f404af | 421 | __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 422 | return(result); |
aravindsv | 0:ba7650f404af | 423 | } |
aravindsv | 0:ba7650f404af | 424 | |
aravindsv | 0:ba7650f404af | 425 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 426 | { |
aravindsv | 0:ba7650f404af | 427 | uint32_t result; |
aravindsv | 0:ba7650f404af | 428 | |
aravindsv | 0:ba7650f404af | 429 | __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 430 | return(result); |
aravindsv | 0:ba7650f404af | 431 | } |
aravindsv | 0:ba7650f404af | 432 | |
aravindsv | 0:ba7650f404af | 433 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) |
aravindsv | 0:ba7650f404af | 434 | { |
aravindsv | 0:ba7650f404af | 435 | uint32_t result; |
aravindsv | 0:ba7650f404af | 436 | |
aravindsv | 0:ba7650f404af | 437 | __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 438 | return(result); |
aravindsv | 0:ba7650f404af | 439 | } |
aravindsv | 0:ba7650f404af | 440 | |
aravindsv | 0:ba7650f404af | 441 | #define __SSAT16(ARG1,ARG2) \ |
aravindsv | 0:ba7650f404af | 442 | ({ \ |
aravindsv | 0:ba7650f404af | 443 | uint32_t __RES, __ARG1 = (ARG1); \ |
aravindsv | 0:ba7650f404af | 444 | __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
aravindsv | 0:ba7650f404af | 445 | __RES; \ |
aravindsv | 0:ba7650f404af | 446 | }) |
aravindsv | 0:ba7650f404af | 447 | |
aravindsv | 0:ba7650f404af | 448 | #define __USAT16(ARG1,ARG2) \ |
aravindsv | 0:ba7650f404af | 449 | ({ \ |
aravindsv | 0:ba7650f404af | 450 | uint32_t __RES, __ARG1 = (ARG1); \ |
aravindsv | 0:ba7650f404af | 451 | __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \ |
aravindsv | 0:ba7650f404af | 452 | __RES; \ |
aravindsv | 0:ba7650f404af | 453 | }) |
aravindsv | 0:ba7650f404af | 454 | |
aravindsv | 0:ba7650f404af | 455 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1) |
aravindsv | 0:ba7650f404af | 456 | { |
aravindsv | 0:ba7650f404af | 457 | uint32_t result; |
aravindsv | 0:ba7650f404af | 458 | |
aravindsv | 0:ba7650f404af | 459 | __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1)); |
aravindsv | 0:ba7650f404af | 460 | return(result); |
aravindsv | 0:ba7650f404af | 461 | } |
aravindsv | 0:ba7650f404af | 462 | |
aravindsv | 0:ba7650f404af | 463 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 464 | { |
aravindsv | 0:ba7650f404af | 465 | uint32_t result; |
aravindsv | 0:ba7650f404af | 466 | |
aravindsv | 0:ba7650f404af | 467 | __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 468 | return(result); |
aravindsv | 0:ba7650f404af | 469 | } |
aravindsv | 0:ba7650f404af | 470 | |
aravindsv | 0:ba7650f404af | 471 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1) |
aravindsv | 0:ba7650f404af | 472 | { |
aravindsv | 0:ba7650f404af | 473 | uint32_t result; |
aravindsv | 0:ba7650f404af | 474 | |
aravindsv | 0:ba7650f404af | 475 | __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1)); |
aravindsv | 0:ba7650f404af | 476 | return(result); |
aravindsv | 0:ba7650f404af | 477 | } |
aravindsv | 0:ba7650f404af | 478 | |
aravindsv | 0:ba7650f404af | 479 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 480 | { |
aravindsv | 0:ba7650f404af | 481 | uint32_t result; |
aravindsv | 0:ba7650f404af | 482 | |
aravindsv | 0:ba7650f404af | 483 | __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 484 | return(result); |
aravindsv | 0:ba7650f404af | 485 | } |
aravindsv | 0:ba7650f404af | 486 | |
aravindsv | 0:ba7650f404af | 487 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 488 | { |
aravindsv | 0:ba7650f404af | 489 | uint32_t result; |
aravindsv | 0:ba7650f404af | 490 | |
aravindsv | 0:ba7650f404af | 491 | __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 492 | return(result); |
aravindsv | 0:ba7650f404af | 493 | } |
aravindsv | 0:ba7650f404af | 494 | |
aravindsv | 0:ba7650f404af | 495 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 496 | { |
aravindsv | 0:ba7650f404af | 497 | uint32_t result; |
aravindsv | 0:ba7650f404af | 498 | |
aravindsv | 0:ba7650f404af | 499 | __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 500 | return(result); |
aravindsv | 0:ba7650f404af | 501 | } |
aravindsv | 0:ba7650f404af | 502 | |
aravindsv | 0:ba7650f404af | 503 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3) |
aravindsv | 0:ba7650f404af | 504 | { |
aravindsv | 0:ba7650f404af | 505 | uint32_t result; |
aravindsv | 0:ba7650f404af | 506 | |
aravindsv | 0:ba7650f404af | 507 | __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 508 | return(result); |
aravindsv | 0:ba7650f404af | 509 | } |
aravindsv | 0:ba7650f404af | 510 | |
aravindsv | 0:ba7650f404af | 511 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3) |
aravindsv | 0:ba7650f404af | 512 | { |
aravindsv | 0:ba7650f404af | 513 | uint32_t result; |
aravindsv | 0:ba7650f404af | 514 | |
aravindsv | 0:ba7650f404af | 515 | __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 516 | return(result); |
aravindsv | 0:ba7650f404af | 517 | } |
aravindsv | 0:ba7650f404af | 518 | |
aravindsv | 0:ba7650f404af | 519 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc) |
aravindsv | 0:ba7650f404af | 520 | { |
aravindsv | 0:ba7650f404af | 521 | union llreg_u{ |
aravindsv | 0:ba7650f404af | 522 | uint32_t w32[2]; |
aravindsv | 0:ba7650f404af | 523 | uint64_t w64; |
aravindsv | 0:ba7650f404af | 524 | } llr; |
aravindsv | 0:ba7650f404af | 525 | llr.w64 = acc; |
aravindsv | 0:ba7650f404af | 526 | |
aravindsv | 0:ba7650f404af | 527 | #ifndef __ARMEB__ // Little endian |
aravindsv | 0:ba7650f404af | 528 | __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); |
aravindsv | 0:ba7650f404af | 529 | #else // Big endian |
aravindsv | 0:ba7650f404af | 530 | __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); |
aravindsv | 0:ba7650f404af | 531 | #endif |
aravindsv | 0:ba7650f404af | 532 | |
aravindsv | 0:ba7650f404af | 533 | return(llr.w64); |
aravindsv | 0:ba7650f404af | 534 | } |
aravindsv | 0:ba7650f404af | 535 | |
aravindsv | 0:ba7650f404af | 536 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc) |
aravindsv | 0:ba7650f404af | 537 | { |
aravindsv | 0:ba7650f404af | 538 | union llreg_u{ |
aravindsv | 0:ba7650f404af | 539 | uint32_t w32[2]; |
aravindsv | 0:ba7650f404af | 540 | uint64_t w64; |
aravindsv | 0:ba7650f404af | 541 | } llr; |
aravindsv | 0:ba7650f404af | 542 | llr.w64 = acc; |
aravindsv | 0:ba7650f404af | 543 | |
aravindsv | 0:ba7650f404af | 544 | #ifndef __ARMEB__ // Little endian |
aravindsv | 0:ba7650f404af | 545 | __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); |
aravindsv | 0:ba7650f404af | 546 | #else // Big endian |
aravindsv | 0:ba7650f404af | 547 | __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); |
aravindsv | 0:ba7650f404af | 548 | #endif |
aravindsv | 0:ba7650f404af | 549 | |
aravindsv | 0:ba7650f404af | 550 | return(llr.w64); |
aravindsv | 0:ba7650f404af | 551 | } |
aravindsv | 0:ba7650f404af | 552 | |
aravindsv | 0:ba7650f404af | 553 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 554 | { |
aravindsv | 0:ba7650f404af | 555 | uint32_t result; |
aravindsv | 0:ba7650f404af | 556 | |
aravindsv | 0:ba7650f404af | 557 | __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 558 | return(result); |
aravindsv | 0:ba7650f404af | 559 | } |
aravindsv | 0:ba7650f404af | 560 | |
aravindsv | 0:ba7650f404af | 561 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 562 | { |
aravindsv | 0:ba7650f404af | 563 | uint32_t result; |
aravindsv | 0:ba7650f404af | 564 | |
aravindsv | 0:ba7650f404af | 565 | __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 566 | return(result); |
aravindsv | 0:ba7650f404af | 567 | } |
aravindsv | 0:ba7650f404af | 568 | |
aravindsv | 0:ba7650f404af | 569 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3) |
aravindsv | 0:ba7650f404af | 570 | { |
aravindsv | 0:ba7650f404af | 571 | uint32_t result; |
aravindsv | 0:ba7650f404af | 572 | |
aravindsv | 0:ba7650f404af | 573 | __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 574 | return(result); |
aravindsv | 0:ba7650f404af | 575 | } |
aravindsv | 0:ba7650f404af | 576 | |
aravindsv | 0:ba7650f404af | 577 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3) |
aravindsv | 0:ba7650f404af | 578 | { |
aravindsv | 0:ba7650f404af | 579 | uint32_t result; |
aravindsv | 0:ba7650f404af | 580 | |
aravindsv | 0:ba7650f404af | 581 | __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 582 | return(result); |
aravindsv | 0:ba7650f404af | 583 | } |
aravindsv | 0:ba7650f404af | 584 | |
aravindsv | 0:ba7650f404af | 585 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc) |
aravindsv | 0:ba7650f404af | 586 | { |
aravindsv | 0:ba7650f404af | 587 | union llreg_u{ |
aravindsv | 0:ba7650f404af | 588 | uint32_t w32[2]; |
aravindsv | 0:ba7650f404af | 589 | uint64_t w64; |
aravindsv | 0:ba7650f404af | 590 | } llr; |
aravindsv | 0:ba7650f404af | 591 | llr.w64 = acc; |
aravindsv | 0:ba7650f404af | 592 | |
aravindsv | 0:ba7650f404af | 593 | #ifndef __ARMEB__ // Little endian |
aravindsv | 0:ba7650f404af | 594 | __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); |
aravindsv | 0:ba7650f404af | 595 | #else // Big endian |
aravindsv | 0:ba7650f404af | 596 | __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); |
aravindsv | 0:ba7650f404af | 597 | #endif |
aravindsv | 0:ba7650f404af | 598 | |
aravindsv | 0:ba7650f404af | 599 | return(llr.w64); |
aravindsv | 0:ba7650f404af | 600 | } |
aravindsv | 0:ba7650f404af | 601 | |
aravindsv | 0:ba7650f404af | 602 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc) |
aravindsv | 0:ba7650f404af | 603 | { |
aravindsv | 0:ba7650f404af | 604 | union llreg_u{ |
aravindsv | 0:ba7650f404af | 605 | uint32_t w32[2]; |
aravindsv | 0:ba7650f404af | 606 | uint64_t w64; |
aravindsv | 0:ba7650f404af | 607 | } llr; |
aravindsv | 0:ba7650f404af | 608 | llr.w64 = acc; |
aravindsv | 0:ba7650f404af | 609 | |
aravindsv | 0:ba7650f404af | 610 | #ifndef __ARMEB__ // Little endian |
aravindsv | 0:ba7650f404af | 611 | __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) ); |
aravindsv | 0:ba7650f404af | 612 | #else // Big endian |
aravindsv | 0:ba7650f404af | 613 | __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) ); |
aravindsv | 0:ba7650f404af | 614 | #endif |
aravindsv | 0:ba7650f404af | 615 | |
aravindsv | 0:ba7650f404af | 616 | return(llr.w64); |
aravindsv | 0:ba7650f404af | 617 | } |
aravindsv | 0:ba7650f404af | 618 | |
aravindsv | 0:ba7650f404af | 619 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 620 | { |
aravindsv | 0:ba7650f404af | 621 | uint32_t result; |
aravindsv | 0:ba7650f404af | 622 | |
aravindsv | 0:ba7650f404af | 623 | __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 624 | return(result); |
aravindsv | 0:ba7650f404af | 625 | } |
aravindsv | 0:ba7650f404af | 626 | |
aravindsv | 0:ba7650f404af | 627 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 628 | { |
aravindsv | 0:ba7650f404af | 629 | uint32_t result; |
aravindsv | 0:ba7650f404af | 630 | |
aravindsv | 0:ba7650f404af | 631 | __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 632 | return(result); |
aravindsv | 0:ba7650f404af | 633 | } |
aravindsv | 0:ba7650f404af | 634 | |
aravindsv | 0:ba7650f404af | 635 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2) |
aravindsv | 0:ba7650f404af | 636 | { |
aravindsv | 0:ba7650f404af | 637 | uint32_t result; |
aravindsv | 0:ba7650f404af | 638 | |
aravindsv | 0:ba7650f404af | 639 | __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); |
aravindsv | 0:ba7650f404af | 640 | return(result); |
aravindsv | 0:ba7650f404af | 641 | } |
aravindsv | 0:ba7650f404af | 642 | |
aravindsv | 0:ba7650f404af | 643 | #define __PKHBT(ARG1,ARG2,ARG3) \ |
aravindsv | 0:ba7650f404af | 644 | ({ \ |
aravindsv | 0:ba7650f404af | 645 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ |
aravindsv | 0:ba7650f404af | 646 | __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ |
aravindsv | 0:ba7650f404af | 647 | __RES; \ |
aravindsv | 0:ba7650f404af | 648 | }) |
aravindsv | 0:ba7650f404af | 649 | |
aravindsv | 0:ba7650f404af | 650 | #define __PKHTB(ARG1,ARG2,ARG3) \ |
aravindsv | 0:ba7650f404af | 651 | ({ \ |
aravindsv | 0:ba7650f404af | 652 | uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ |
aravindsv | 0:ba7650f404af | 653 | if (ARG3 == 0) \ |
aravindsv | 0:ba7650f404af | 654 | __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ |
aravindsv | 0:ba7650f404af | 655 | else \ |
aravindsv | 0:ba7650f404af | 656 | __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ |
aravindsv | 0:ba7650f404af | 657 | __RES; \ |
aravindsv | 0:ba7650f404af | 658 | }) |
aravindsv | 0:ba7650f404af | 659 | |
aravindsv | 0:ba7650f404af | 660 | __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) |
aravindsv | 0:ba7650f404af | 661 | { |
aravindsv | 0:ba7650f404af | 662 | int32_t result; |
aravindsv | 0:ba7650f404af | 663 | |
aravindsv | 0:ba7650f404af | 664 | __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); |
aravindsv | 0:ba7650f404af | 665 | return(result); |
aravindsv | 0:ba7650f404af | 666 | } |
aravindsv | 0:ba7650f404af | 667 | |
aravindsv | 0:ba7650f404af | 668 | |
aravindsv | 0:ba7650f404af | 669 | #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/ |
aravindsv | 0:ba7650f404af | 670 | /* IAR iccarm specific functions */ |
aravindsv | 0:ba7650f404af | 671 | #include <cmsis_iar.h> |
aravindsv | 0:ba7650f404af | 672 | |
aravindsv | 0:ba7650f404af | 673 | |
aravindsv | 0:ba7650f404af | 674 | #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/ |
aravindsv | 0:ba7650f404af | 675 | /* TI CCS specific functions */ |
aravindsv | 0:ba7650f404af | 676 | #include <cmsis_ccs.h> |
aravindsv | 0:ba7650f404af | 677 | |
aravindsv | 0:ba7650f404af | 678 | |
aravindsv | 0:ba7650f404af | 679 | #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/ |
aravindsv | 0:ba7650f404af | 680 | /* TASKING carm specific functions */ |
aravindsv | 0:ba7650f404af | 681 | /* not yet supported */ |
aravindsv | 0:ba7650f404af | 682 | |
aravindsv | 0:ba7650f404af | 683 | |
aravindsv | 0:ba7650f404af | 684 | #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/ |
aravindsv | 0:ba7650f404af | 685 | /* Cosmic specific functions */ |
aravindsv | 0:ba7650f404af | 686 | #include <cmsis_csm.h> |
aravindsv | 0:ba7650f404af | 687 | |
aravindsv | 0:ba7650f404af | 688 | #endif |
aravindsv | 0:ba7650f404af | 689 | |
aravindsv | 0:ba7650f404af | 690 | /*@} end of group CMSIS_SIMD_intrinsics */ |
aravindsv | 0:ba7650f404af | 691 | |
aravindsv | 0:ba7650f404af | 692 | |
aravindsv | 0:ba7650f404af | 693 | #ifdef __cplusplus |
aravindsv | 0:ba7650f404af | 694 | } |
aravindsv | 0:ba7650f404af | 695 | #endif |
aravindsv | 0:ba7650f404af | 696 | |
aravindsv | 0:ba7650f404af | 697 | #endif /* __CORE_CMSIMD_H */ |