This is the final version of Mini Gateway for Automation and Security desgined for Renesas GR Peach Design Contest

Dependencies:   GR-PEACH_video GraphicsFramework HTTPServer R_BSP mbed-rpc mbed-rtos Socket lwip-eth lwip-sys lwip FATFileSystem

Fork of mbed-os-example-mbed5-blinky by mbed-os-examples

Committer:
vipinranka
Date:
Wed Jan 11 11:41:30 2017 +0000
Revision:
12:9a20164dcc47
This is the final version MGAS Project for Renesas GR Peach Design Contest

Who changed what in which revision?

UserRevisionLine numberNew contents of line
vipinranka 12:9a20164dcc47 1 /**************************************************************************//**
vipinranka 12:9a20164dcc47 2 * @file core_cmSimd.h
vipinranka 12:9a20164dcc47 3 * @brief CMSIS Cortex-M SIMD Header File
vipinranka 12:9a20164dcc47 4 * @version V4.10
vipinranka 12:9a20164dcc47 5 * @date 18. March 2015
vipinranka 12:9a20164dcc47 6 *
vipinranka 12:9a20164dcc47 7 * @note
vipinranka 12:9a20164dcc47 8 *
vipinranka 12:9a20164dcc47 9 ******************************************************************************/
vipinranka 12:9a20164dcc47 10 /* Copyright (c) 2009 - 2014 ARM LIMITED
vipinranka 12:9a20164dcc47 11
vipinranka 12:9a20164dcc47 12 All rights reserved.
vipinranka 12:9a20164dcc47 13 Redistribution and use in source and binary forms, with or without
vipinranka 12:9a20164dcc47 14 modification, are permitted provided that the following conditions are met:
vipinranka 12:9a20164dcc47 15 - Redistributions of source code must retain the above copyright
vipinranka 12:9a20164dcc47 16 notice, this list of conditions and the following disclaimer.
vipinranka 12:9a20164dcc47 17 - Redistributions in binary form must reproduce the above copyright
vipinranka 12:9a20164dcc47 18 notice, this list of conditions and the following disclaimer in the
vipinranka 12:9a20164dcc47 19 documentation and/or other materials provided with the distribution.
vipinranka 12:9a20164dcc47 20 - Neither the name of ARM nor the names of its contributors may be used
vipinranka 12:9a20164dcc47 21 to endorse or promote products derived from this software without
vipinranka 12:9a20164dcc47 22 specific prior written permission.
vipinranka 12:9a20164dcc47 23 *
vipinranka 12:9a20164dcc47 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
vipinranka 12:9a20164dcc47 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
vipinranka 12:9a20164dcc47 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
vipinranka 12:9a20164dcc47 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
vipinranka 12:9a20164dcc47 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
vipinranka 12:9a20164dcc47 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
vipinranka 12:9a20164dcc47 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
vipinranka 12:9a20164dcc47 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
vipinranka 12:9a20164dcc47 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
vipinranka 12:9a20164dcc47 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
vipinranka 12:9a20164dcc47 34 POSSIBILITY OF SUCH DAMAGE.
vipinranka 12:9a20164dcc47 35 ---------------------------------------------------------------------------*/
vipinranka 12:9a20164dcc47 36
vipinranka 12:9a20164dcc47 37
vipinranka 12:9a20164dcc47 38 #if defined ( __ICCARM__ )
vipinranka 12:9a20164dcc47 39 #pragma system_include /* treat file as system include file for MISRA check */
vipinranka 12:9a20164dcc47 40 #endif
vipinranka 12:9a20164dcc47 41
vipinranka 12:9a20164dcc47 42 #ifndef __CORE_CMSIMD_H
vipinranka 12:9a20164dcc47 43 #define __CORE_CMSIMD_H
vipinranka 12:9a20164dcc47 44
vipinranka 12:9a20164dcc47 45 #ifdef __cplusplus
vipinranka 12:9a20164dcc47 46 extern "C" {
vipinranka 12:9a20164dcc47 47 #endif
vipinranka 12:9a20164dcc47 48
vipinranka 12:9a20164dcc47 49
vipinranka 12:9a20164dcc47 50 /*******************************************************************************
vipinranka 12:9a20164dcc47 51 * Hardware Abstraction Layer
vipinranka 12:9a20164dcc47 52 ******************************************************************************/
vipinranka 12:9a20164dcc47 53
vipinranka 12:9a20164dcc47 54
vipinranka 12:9a20164dcc47 55 /* ################### Compiler specific Intrinsics ########################### */
vipinranka 12:9a20164dcc47 56 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
vipinranka 12:9a20164dcc47 57 Access to dedicated SIMD instructions
vipinranka 12:9a20164dcc47 58 @{
vipinranka 12:9a20164dcc47 59 */
vipinranka 12:9a20164dcc47 60
vipinranka 12:9a20164dcc47 61 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
vipinranka 12:9a20164dcc47 62 /* ARM armcc specific functions */
vipinranka 12:9a20164dcc47 63 #define __SADD8 __sadd8
vipinranka 12:9a20164dcc47 64 #define __QADD8 __qadd8
vipinranka 12:9a20164dcc47 65 #define __SHADD8 __shadd8
vipinranka 12:9a20164dcc47 66 #define __UADD8 __uadd8
vipinranka 12:9a20164dcc47 67 #define __UQADD8 __uqadd8
vipinranka 12:9a20164dcc47 68 #define __UHADD8 __uhadd8
vipinranka 12:9a20164dcc47 69 #define __SSUB8 __ssub8
vipinranka 12:9a20164dcc47 70 #define __QSUB8 __qsub8
vipinranka 12:9a20164dcc47 71 #define __SHSUB8 __shsub8
vipinranka 12:9a20164dcc47 72 #define __USUB8 __usub8
vipinranka 12:9a20164dcc47 73 #define __UQSUB8 __uqsub8
vipinranka 12:9a20164dcc47 74 #define __UHSUB8 __uhsub8
vipinranka 12:9a20164dcc47 75 #define __SADD16 __sadd16
vipinranka 12:9a20164dcc47 76 #define __QADD16 __qadd16
vipinranka 12:9a20164dcc47 77 #define __SHADD16 __shadd16
vipinranka 12:9a20164dcc47 78 #define __UADD16 __uadd16
vipinranka 12:9a20164dcc47 79 #define __UQADD16 __uqadd16
vipinranka 12:9a20164dcc47 80 #define __UHADD16 __uhadd16
vipinranka 12:9a20164dcc47 81 #define __SSUB16 __ssub16
vipinranka 12:9a20164dcc47 82 #define __QSUB16 __qsub16
vipinranka 12:9a20164dcc47 83 #define __SHSUB16 __shsub16
vipinranka 12:9a20164dcc47 84 #define __USUB16 __usub16
vipinranka 12:9a20164dcc47 85 #define __UQSUB16 __uqsub16
vipinranka 12:9a20164dcc47 86 #define __UHSUB16 __uhsub16
vipinranka 12:9a20164dcc47 87 #define __SASX __sasx
vipinranka 12:9a20164dcc47 88 #define __QASX __qasx
vipinranka 12:9a20164dcc47 89 #define __SHASX __shasx
vipinranka 12:9a20164dcc47 90 #define __UASX __uasx
vipinranka 12:9a20164dcc47 91 #define __UQASX __uqasx
vipinranka 12:9a20164dcc47 92 #define __UHASX __uhasx
vipinranka 12:9a20164dcc47 93 #define __SSAX __ssax
vipinranka 12:9a20164dcc47 94 #define __QSAX __qsax
vipinranka 12:9a20164dcc47 95 #define __SHSAX __shsax
vipinranka 12:9a20164dcc47 96 #define __USAX __usax
vipinranka 12:9a20164dcc47 97 #define __UQSAX __uqsax
vipinranka 12:9a20164dcc47 98 #define __UHSAX __uhsax
vipinranka 12:9a20164dcc47 99 #define __USAD8 __usad8
vipinranka 12:9a20164dcc47 100 #define __USADA8 __usada8
vipinranka 12:9a20164dcc47 101 #define __SSAT16 __ssat16
vipinranka 12:9a20164dcc47 102 #define __USAT16 __usat16
vipinranka 12:9a20164dcc47 103 #define __UXTB16 __uxtb16
vipinranka 12:9a20164dcc47 104 #define __UXTAB16 __uxtab16
vipinranka 12:9a20164dcc47 105 #define __SXTB16 __sxtb16
vipinranka 12:9a20164dcc47 106 #define __SXTAB16 __sxtab16
vipinranka 12:9a20164dcc47 107 #define __SMUAD __smuad
vipinranka 12:9a20164dcc47 108 #define __SMUADX __smuadx
vipinranka 12:9a20164dcc47 109 #define __SMLAD __smlad
vipinranka 12:9a20164dcc47 110 #define __SMLADX __smladx
vipinranka 12:9a20164dcc47 111 #define __SMLALD __smlald
vipinranka 12:9a20164dcc47 112 #define __SMLALDX __smlaldx
vipinranka 12:9a20164dcc47 113 #define __SMUSD __smusd
vipinranka 12:9a20164dcc47 114 #define __SMUSDX __smusdx
vipinranka 12:9a20164dcc47 115 #define __SMLSD __smlsd
vipinranka 12:9a20164dcc47 116 #define __SMLSDX __smlsdx
vipinranka 12:9a20164dcc47 117 #define __SMLSLD __smlsld
vipinranka 12:9a20164dcc47 118 #define __SMLSLDX __smlsldx
vipinranka 12:9a20164dcc47 119 #define __SEL __sel
vipinranka 12:9a20164dcc47 120 #define __QADD __qadd
vipinranka 12:9a20164dcc47 121 #define __QSUB __qsub
vipinranka 12:9a20164dcc47 122
vipinranka 12:9a20164dcc47 123 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
vipinranka 12:9a20164dcc47 124 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
vipinranka 12:9a20164dcc47 125
vipinranka 12:9a20164dcc47 126 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
vipinranka 12:9a20164dcc47 127 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
vipinranka 12:9a20164dcc47 128
vipinranka 12:9a20164dcc47 129 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
vipinranka 12:9a20164dcc47 130 ((int64_t)(ARG3) << 32) ) >> 32))
vipinranka 12:9a20164dcc47 131
vipinranka 12:9a20164dcc47 132
vipinranka 12:9a20164dcc47 133 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
vipinranka 12:9a20164dcc47 134 /* GNU gcc specific functions */
vipinranka 12:9a20164dcc47 135 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 136 {
vipinranka 12:9a20164dcc47 137 uint32_t result;
vipinranka 12:9a20164dcc47 138
vipinranka 12:9a20164dcc47 139 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 140 return(result);
vipinranka 12:9a20164dcc47 141 }
vipinranka 12:9a20164dcc47 142
vipinranka 12:9a20164dcc47 143 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 144 {
vipinranka 12:9a20164dcc47 145 uint32_t result;
vipinranka 12:9a20164dcc47 146
vipinranka 12:9a20164dcc47 147 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 148 return(result);
vipinranka 12:9a20164dcc47 149 }
vipinranka 12:9a20164dcc47 150
vipinranka 12:9a20164dcc47 151 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 152 {
vipinranka 12:9a20164dcc47 153 uint32_t result;
vipinranka 12:9a20164dcc47 154
vipinranka 12:9a20164dcc47 155 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 156 return(result);
vipinranka 12:9a20164dcc47 157 }
vipinranka 12:9a20164dcc47 158
vipinranka 12:9a20164dcc47 159 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 160 {
vipinranka 12:9a20164dcc47 161 uint32_t result;
vipinranka 12:9a20164dcc47 162
vipinranka 12:9a20164dcc47 163 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 164 return(result);
vipinranka 12:9a20164dcc47 165 }
vipinranka 12:9a20164dcc47 166
vipinranka 12:9a20164dcc47 167 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 168 {
vipinranka 12:9a20164dcc47 169 uint32_t result;
vipinranka 12:9a20164dcc47 170
vipinranka 12:9a20164dcc47 171 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 172 return(result);
vipinranka 12:9a20164dcc47 173 }
vipinranka 12:9a20164dcc47 174
vipinranka 12:9a20164dcc47 175 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 176 {
vipinranka 12:9a20164dcc47 177 uint32_t result;
vipinranka 12:9a20164dcc47 178
vipinranka 12:9a20164dcc47 179 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 180 return(result);
vipinranka 12:9a20164dcc47 181 }
vipinranka 12:9a20164dcc47 182
vipinranka 12:9a20164dcc47 183
vipinranka 12:9a20164dcc47 184 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 185 {
vipinranka 12:9a20164dcc47 186 uint32_t result;
vipinranka 12:9a20164dcc47 187
vipinranka 12:9a20164dcc47 188 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 189 return(result);
vipinranka 12:9a20164dcc47 190 }
vipinranka 12:9a20164dcc47 191
vipinranka 12:9a20164dcc47 192 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 193 {
vipinranka 12:9a20164dcc47 194 uint32_t result;
vipinranka 12:9a20164dcc47 195
vipinranka 12:9a20164dcc47 196 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 197 return(result);
vipinranka 12:9a20164dcc47 198 }
vipinranka 12:9a20164dcc47 199
vipinranka 12:9a20164dcc47 200 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 201 {
vipinranka 12:9a20164dcc47 202 uint32_t result;
vipinranka 12:9a20164dcc47 203
vipinranka 12:9a20164dcc47 204 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 205 return(result);
vipinranka 12:9a20164dcc47 206 }
vipinranka 12:9a20164dcc47 207
vipinranka 12:9a20164dcc47 208 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 209 {
vipinranka 12:9a20164dcc47 210 uint32_t result;
vipinranka 12:9a20164dcc47 211
vipinranka 12:9a20164dcc47 212 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 213 return(result);
vipinranka 12:9a20164dcc47 214 }
vipinranka 12:9a20164dcc47 215
vipinranka 12:9a20164dcc47 216 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 217 {
vipinranka 12:9a20164dcc47 218 uint32_t result;
vipinranka 12:9a20164dcc47 219
vipinranka 12:9a20164dcc47 220 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 221 return(result);
vipinranka 12:9a20164dcc47 222 }
vipinranka 12:9a20164dcc47 223
vipinranka 12:9a20164dcc47 224 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 225 {
vipinranka 12:9a20164dcc47 226 uint32_t result;
vipinranka 12:9a20164dcc47 227
vipinranka 12:9a20164dcc47 228 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 229 return(result);
vipinranka 12:9a20164dcc47 230 }
vipinranka 12:9a20164dcc47 231
vipinranka 12:9a20164dcc47 232
vipinranka 12:9a20164dcc47 233 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 234 {
vipinranka 12:9a20164dcc47 235 uint32_t result;
vipinranka 12:9a20164dcc47 236
vipinranka 12:9a20164dcc47 237 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 238 return(result);
vipinranka 12:9a20164dcc47 239 }
vipinranka 12:9a20164dcc47 240
vipinranka 12:9a20164dcc47 241 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 242 {
vipinranka 12:9a20164dcc47 243 uint32_t result;
vipinranka 12:9a20164dcc47 244
vipinranka 12:9a20164dcc47 245 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 246 return(result);
vipinranka 12:9a20164dcc47 247 }
vipinranka 12:9a20164dcc47 248
vipinranka 12:9a20164dcc47 249 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 250 {
vipinranka 12:9a20164dcc47 251 uint32_t result;
vipinranka 12:9a20164dcc47 252
vipinranka 12:9a20164dcc47 253 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 254 return(result);
vipinranka 12:9a20164dcc47 255 }
vipinranka 12:9a20164dcc47 256
vipinranka 12:9a20164dcc47 257 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 258 {
vipinranka 12:9a20164dcc47 259 uint32_t result;
vipinranka 12:9a20164dcc47 260
vipinranka 12:9a20164dcc47 261 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 262 return(result);
vipinranka 12:9a20164dcc47 263 }
vipinranka 12:9a20164dcc47 264
vipinranka 12:9a20164dcc47 265 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 266 {
vipinranka 12:9a20164dcc47 267 uint32_t result;
vipinranka 12:9a20164dcc47 268
vipinranka 12:9a20164dcc47 269 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 270 return(result);
vipinranka 12:9a20164dcc47 271 }
vipinranka 12:9a20164dcc47 272
vipinranka 12:9a20164dcc47 273 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 274 {
vipinranka 12:9a20164dcc47 275 uint32_t result;
vipinranka 12:9a20164dcc47 276
vipinranka 12:9a20164dcc47 277 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 278 return(result);
vipinranka 12:9a20164dcc47 279 }
vipinranka 12:9a20164dcc47 280
vipinranka 12:9a20164dcc47 281 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 282 {
vipinranka 12:9a20164dcc47 283 uint32_t result;
vipinranka 12:9a20164dcc47 284
vipinranka 12:9a20164dcc47 285 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 286 return(result);
vipinranka 12:9a20164dcc47 287 }
vipinranka 12:9a20164dcc47 288
vipinranka 12:9a20164dcc47 289 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 290 {
vipinranka 12:9a20164dcc47 291 uint32_t result;
vipinranka 12:9a20164dcc47 292
vipinranka 12:9a20164dcc47 293 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 294 return(result);
vipinranka 12:9a20164dcc47 295 }
vipinranka 12:9a20164dcc47 296
vipinranka 12:9a20164dcc47 297 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 298 {
vipinranka 12:9a20164dcc47 299 uint32_t result;
vipinranka 12:9a20164dcc47 300
vipinranka 12:9a20164dcc47 301 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 302 return(result);
vipinranka 12:9a20164dcc47 303 }
vipinranka 12:9a20164dcc47 304
vipinranka 12:9a20164dcc47 305 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 306 {
vipinranka 12:9a20164dcc47 307 uint32_t result;
vipinranka 12:9a20164dcc47 308
vipinranka 12:9a20164dcc47 309 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 310 return(result);
vipinranka 12:9a20164dcc47 311 }
vipinranka 12:9a20164dcc47 312
vipinranka 12:9a20164dcc47 313 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 314 {
vipinranka 12:9a20164dcc47 315 uint32_t result;
vipinranka 12:9a20164dcc47 316
vipinranka 12:9a20164dcc47 317 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 318 return(result);
vipinranka 12:9a20164dcc47 319 }
vipinranka 12:9a20164dcc47 320
vipinranka 12:9a20164dcc47 321 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 322 {
vipinranka 12:9a20164dcc47 323 uint32_t result;
vipinranka 12:9a20164dcc47 324
vipinranka 12:9a20164dcc47 325 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 326 return(result);
vipinranka 12:9a20164dcc47 327 }
vipinranka 12:9a20164dcc47 328
vipinranka 12:9a20164dcc47 329 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 330 {
vipinranka 12:9a20164dcc47 331 uint32_t result;
vipinranka 12:9a20164dcc47 332
vipinranka 12:9a20164dcc47 333 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 334 return(result);
vipinranka 12:9a20164dcc47 335 }
vipinranka 12:9a20164dcc47 336
vipinranka 12:9a20164dcc47 337 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 338 {
vipinranka 12:9a20164dcc47 339 uint32_t result;
vipinranka 12:9a20164dcc47 340
vipinranka 12:9a20164dcc47 341 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 342 return(result);
vipinranka 12:9a20164dcc47 343 }
vipinranka 12:9a20164dcc47 344
vipinranka 12:9a20164dcc47 345 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 346 {
vipinranka 12:9a20164dcc47 347 uint32_t result;
vipinranka 12:9a20164dcc47 348
vipinranka 12:9a20164dcc47 349 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 350 return(result);
vipinranka 12:9a20164dcc47 351 }
vipinranka 12:9a20164dcc47 352
vipinranka 12:9a20164dcc47 353 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 354 {
vipinranka 12:9a20164dcc47 355 uint32_t result;
vipinranka 12:9a20164dcc47 356
vipinranka 12:9a20164dcc47 357 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 358 return(result);
vipinranka 12:9a20164dcc47 359 }
vipinranka 12:9a20164dcc47 360
vipinranka 12:9a20164dcc47 361 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 362 {
vipinranka 12:9a20164dcc47 363 uint32_t result;
vipinranka 12:9a20164dcc47 364
vipinranka 12:9a20164dcc47 365 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 366 return(result);
vipinranka 12:9a20164dcc47 367 }
vipinranka 12:9a20164dcc47 368
vipinranka 12:9a20164dcc47 369 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 370 {
vipinranka 12:9a20164dcc47 371 uint32_t result;
vipinranka 12:9a20164dcc47 372
vipinranka 12:9a20164dcc47 373 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 374 return(result);
vipinranka 12:9a20164dcc47 375 }
vipinranka 12:9a20164dcc47 376
vipinranka 12:9a20164dcc47 377 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 378 {
vipinranka 12:9a20164dcc47 379 uint32_t result;
vipinranka 12:9a20164dcc47 380
vipinranka 12:9a20164dcc47 381 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 382 return(result);
vipinranka 12:9a20164dcc47 383 }
vipinranka 12:9a20164dcc47 384
vipinranka 12:9a20164dcc47 385 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 386 {
vipinranka 12:9a20164dcc47 387 uint32_t result;
vipinranka 12:9a20164dcc47 388
vipinranka 12:9a20164dcc47 389 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 390 return(result);
vipinranka 12:9a20164dcc47 391 }
vipinranka 12:9a20164dcc47 392
vipinranka 12:9a20164dcc47 393 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 394 {
vipinranka 12:9a20164dcc47 395 uint32_t result;
vipinranka 12:9a20164dcc47 396
vipinranka 12:9a20164dcc47 397 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 398 return(result);
vipinranka 12:9a20164dcc47 399 }
vipinranka 12:9a20164dcc47 400
vipinranka 12:9a20164dcc47 401 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 402 {
vipinranka 12:9a20164dcc47 403 uint32_t result;
vipinranka 12:9a20164dcc47 404
vipinranka 12:9a20164dcc47 405 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 406 return(result);
vipinranka 12:9a20164dcc47 407 }
vipinranka 12:9a20164dcc47 408
vipinranka 12:9a20164dcc47 409 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 410 {
vipinranka 12:9a20164dcc47 411 uint32_t result;
vipinranka 12:9a20164dcc47 412
vipinranka 12:9a20164dcc47 413 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 414 return(result);
vipinranka 12:9a20164dcc47 415 }
vipinranka 12:9a20164dcc47 416
vipinranka 12:9a20164dcc47 417 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 418 {
vipinranka 12:9a20164dcc47 419 uint32_t result;
vipinranka 12:9a20164dcc47 420
vipinranka 12:9a20164dcc47 421 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 422 return(result);
vipinranka 12:9a20164dcc47 423 }
vipinranka 12:9a20164dcc47 424
vipinranka 12:9a20164dcc47 425 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 426 {
vipinranka 12:9a20164dcc47 427 uint32_t result;
vipinranka 12:9a20164dcc47 428
vipinranka 12:9a20164dcc47 429 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 430 return(result);
vipinranka 12:9a20164dcc47 431 }
vipinranka 12:9a20164dcc47 432
vipinranka 12:9a20164dcc47 433 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 434 {
vipinranka 12:9a20164dcc47 435 uint32_t result;
vipinranka 12:9a20164dcc47 436
vipinranka 12:9a20164dcc47 437 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 438 return(result);
vipinranka 12:9a20164dcc47 439 }
vipinranka 12:9a20164dcc47 440
vipinranka 12:9a20164dcc47 441 #define __SSAT16(ARG1,ARG2) \
vipinranka 12:9a20164dcc47 442 ({ \
vipinranka 12:9a20164dcc47 443 uint32_t __RES, __ARG1 = (ARG1); \
vipinranka 12:9a20164dcc47 444 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vipinranka 12:9a20164dcc47 445 __RES; \
vipinranka 12:9a20164dcc47 446 })
vipinranka 12:9a20164dcc47 447
vipinranka 12:9a20164dcc47 448 #define __USAT16(ARG1,ARG2) \
vipinranka 12:9a20164dcc47 449 ({ \
vipinranka 12:9a20164dcc47 450 uint32_t __RES, __ARG1 = (ARG1); \
vipinranka 12:9a20164dcc47 451 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vipinranka 12:9a20164dcc47 452 __RES; \
vipinranka 12:9a20164dcc47 453 })
vipinranka 12:9a20164dcc47 454
vipinranka 12:9a20164dcc47 455 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
vipinranka 12:9a20164dcc47 456 {
vipinranka 12:9a20164dcc47 457 uint32_t result;
vipinranka 12:9a20164dcc47 458
vipinranka 12:9a20164dcc47 459 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
vipinranka 12:9a20164dcc47 460 return(result);
vipinranka 12:9a20164dcc47 461 }
vipinranka 12:9a20164dcc47 462
vipinranka 12:9a20164dcc47 463 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 464 {
vipinranka 12:9a20164dcc47 465 uint32_t result;
vipinranka 12:9a20164dcc47 466
vipinranka 12:9a20164dcc47 467 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 468 return(result);
vipinranka 12:9a20164dcc47 469 }
vipinranka 12:9a20164dcc47 470
vipinranka 12:9a20164dcc47 471 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
vipinranka 12:9a20164dcc47 472 {
vipinranka 12:9a20164dcc47 473 uint32_t result;
vipinranka 12:9a20164dcc47 474
vipinranka 12:9a20164dcc47 475 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
vipinranka 12:9a20164dcc47 476 return(result);
vipinranka 12:9a20164dcc47 477 }
vipinranka 12:9a20164dcc47 478
vipinranka 12:9a20164dcc47 479 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 480 {
vipinranka 12:9a20164dcc47 481 uint32_t result;
vipinranka 12:9a20164dcc47 482
vipinranka 12:9a20164dcc47 483 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 484 return(result);
vipinranka 12:9a20164dcc47 485 }
vipinranka 12:9a20164dcc47 486
vipinranka 12:9a20164dcc47 487 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 488 {
vipinranka 12:9a20164dcc47 489 uint32_t result;
vipinranka 12:9a20164dcc47 490
vipinranka 12:9a20164dcc47 491 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 492 return(result);
vipinranka 12:9a20164dcc47 493 }
vipinranka 12:9a20164dcc47 494
vipinranka 12:9a20164dcc47 495 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 496 {
vipinranka 12:9a20164dcc47 497 uint32_t result;
vipinranka 12:9a20164dcc47 498
vipinranka 12:9a20164dcc47 499 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 500 return(result);
vipinranka 12:9a20164dcc47 501 }
vipinranka 12:9a20164dcc47 502
vipinranka 12:9a20164dcc47 503 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 504 {
vipinranka 12:9a20164dcc47 505 uint32_t result;
vipinranka 12:9a20164dcc47 506
vipinranka 12:9a20164dcc47 507 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 508 return(result);
vipinranka 12:9a20164dcc47 509 }
vipinranka 12:9a20164dcc47 510
vipinranka 12:9a20164dcc47 511 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 512 {
vipinranka 12:9a20164dcc47 513 uint32_t result;
vipinranka 12:9a20164dcc47 514
vipinranka 12:9a20164dcc47 515 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 516 return(result);
vipinranka 12:9a20164dcc47 517 }
vipinranka 12:9a20164dcc47 518
vipinranka 12:9a20164dcc47 519 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
vipinranka 12:9a20164dcc47 520 {
vipinranka 12:9a20164dcc47 521 union llreg_u{
vipinranka 12:9a20164dcc47 522 uint32_t w32[2];
vipinranka 12:9a20164dcc47 523 uint64_t w64;
vipinranka 12:9a20164dcc47 524 } llr;
vipinranka 12:9a20164dcc47 525 llr.w64 = acc;
vipinranka 12:9a20164dcc47 526
vipinranka 12:9a20164dcc47 527 #ifndef __ARMEB__ // Little endian
vipinranka 12:9a20164dcc47 528 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vipinranka 12:9a20164dcc47 529 #else // Big endian
vipinranka 12:9a20164dcc47 530 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vipinranka 12:9a20164dcc47 531 #endif
vipinranka 12:9a20164dcc47 532
vipinranka 12:9a20164dcc47 533 return(llr.w64);
vipinranka 12:9a20164dcc47 534 }
vipinranka 12:9a20164dcc47 535
vipinranka 12:9a20164dcc47 536 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
vipinranka 12:9a20164dcc47 537 {
vipinranka 12:9a20164dcc47 538 union llreg_u{
vipinranka 12:9a20164dcc47 539 uint32_t w32[2];
vipinranka 12:9a20164dcc47 540 uint64_t w64;
vipinranka 12:9a20164dcc47 541 } llr;
vipinranka 12:9a20164dcc47 542 llr.w64 = acc;
vipinranka 12:9a20164dcc47 543
vipinranka 12:9a20164dcc47 544 #ifndef __ARMEB__ // Little endian
vipinranka 12:9a20164dcc47 545 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vipinranka 12:9a20164dcc47 546 #else // Big endian
vipinranka 12:9a20164dcc47 547 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vipinranka 12:9a20164dcc47 548 #endif
vipinranka 12:9a20164dcc47 549
vipinranka 12:9a20164dcc47 550 return(llr.w64);
vipinranka 12:9a20164dcc47 551 }
vipinranka 12:9a20164dcc47 552
vipinranka 12:9a20164dcc47 553 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 554 {
vipinranka 12:9a20164dcc47 555 uint32_t result;
vipinranka 12:9a20164dcc47 556
vipinranka 12:9a20164dcc47 557 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 558 return(result);
vipinranka 12:9a20164dcc47 559 }
vipinranka 12:9a20164dcc47 560
vipinranka 12:9a20164dcc47 561 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 562 {
vipinranka 12:9a20164dcc47 563 uint32_t result;
vipinranka 12:9a20164dcc47 564
vipinranka 12:9a20164dcc47 565 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 566 return(result);
vipinranka 12:9a20164dcc47 567 }
vipinranka 12:9a20164dcc47 568
vipinranka 12:9a20164dcc47 569 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 570 {
vipinranka 12:9a20164dcc47 571 uint32_t result;
vipinranka 12:9a20164dcc47 572
vipinranka 12:9a20164dcc47 573 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 574 return(result);
vipinranka 12:9a20164dcc47 575 }
vipinranka 12:9a20164dcc47 576
vipinranka 12:9a20164dcc47 577 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 578 {
vipinranka 12:9a20164dcc47 579 uint32_t result;
vipinranka 12:9a20164dcc47 580
vipinranka 12:9a20164dcc47 581 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 582 return(result);
vipinranka 12:9a20164dcc47 583 }
vipinranka 12:9a20164dcc47 584
vipinranka 12:9a20164dcc47 585 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
vipinranka 12:9a20164dcc47 586 {
vipinranka 12:9a20164dcc47 587 union llreg_u{
vipinranka 12:9a20164dcc47 588 uint32_t w32[2];
vipinranka 12:9a20164dcc47 589 uint64_t w64;
vipinranka 12:9a20164dcc47 590 } llr;
vipinranka 12:9a20164dcc47 591 llr.w64 = acc;
vipinranka 12:9a20164dcc47 592
vipinranka 12:9a20164dcc47 593 #ifndef __ARMEB__ // Little endian
vipinranka 12:9a20164dcc47 594 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vipinranka 12:9a20164dcc47 595 #else // Big endian
vipinranka 12:9a20164dcc47 596 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vipinranka 12:9a20164dcc47 597 #endif
vipinranka 12:9a20164dcc47 598
vipinranka 12:9a20164dcc47 599 return(llr.w64);
vipinranka 12:9a20164dcc47 600 }
vipinranka 12:9a20164dcc47 601
vipinranka 12:9a20164dcc47 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
vipinranka 12:9a20164dcc47 603 {
vipinranka 12:9a20164dcc47 604 union llreg_u{
vipinranka 12:9a20164dcc47 605 uint32_t w32[2];
vipinranka 12:9a20164dcc47 606 uint64_t w64;
vipinranka 12:9a20164dcc47 607 } llr;
vipinranka 12:9a20164dcc47 608 llr.w64 = acc;
vipinranka 12:9a20164dcc47 609
vipinranka 12:9a20164dcc47 610 #ifndef __ARMEB__ // Little endian
vipinranka 12:9a20164dcc47 611 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
vipinranka 12:9a20164dcc47 612 #else // Big endian
vipinranka 12:9a20164dcc47 613 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
vipinranka 12:9a20164dcc47 614 #endif
vipinranka 12:9a20164dcc47 615
vipinranka 12:9a20164dcc47 616 return(llr.w64);
vipinranka 12:9a20164dcc47 617 }
vipinranka 12:9a20164dcc47 618
vipinranka 12:9a20164dcc47 619 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 620 {
vipinranka 12:9a20164dcc47 621 uint32_t result;
vipinranka 12:9a20164dcc47 622
vipinranka 12:9a20164dcc47 623 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 624 return(result);
vipinranka 12:9a20164dcc47 625 }
vipinranka 12:9a20164dcc47 626
vipinranka 12:9a20164dcc47 627 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 628 {
vipinranka 12:9a20164dcc47 629 uint32_t result;
vipinranka 12:9a20164dcc47 630
vipinranka 12:9a20164dcc47 631 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 632 return(result);
vipinranka 12:9a20164dcc47 633 }
vipinranka 12:9a20164dcc47 634
vipinranka 12:9a20164dcc47 635 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 636 {
vipinranka 12:9a20164dcc47 637 uint32_t result;
vipinranka 12:9a20164dcc47 638
vipinranka 12:9a20164dcc47 639 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 640 return(result);
vipinranka 12:9a20164dcc47 641 }
vipinranka 12:9a20164dcc47 642
vipinranka 12:9a20164dcc47 643 #define __PKHBT(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 644 ({ \
vipinranka 12:9a20164dcc47 645 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vipinranka 12:9a20164dcc47 646 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vipinranka 12:9a20164dcc47 647 __RES; \
vipinranka 12:9a20164dcc47 648 })
vipinranka 12:9a20164dcc47 649
vipinranka 12:9a20164dcc47 650 #define __PKHTB(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 651 ({ \
vipinranka 12:9a20164dcc47 652 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vipinranka 12:9a20164dcc47 653 if (ARG3 == 0) \
vipinranka 12:9a20164dcc47 654 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
vipinranka 12:9a20164dcc47 655 else \
vipinranka 12:9a20164dcc47 656 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vipinranka 12:9a20164dcc47 657 __RES; \
vipinranka 12:9a20164dcc47 658 })
vipinranka 12:9a20164dcc47 659
vipinranka 12:9a20164dcc47 660 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
vipinranka 12:9a20164dcc47 661 {
vipinranka 12:9a20164dcc47 662 int32_t result;
vipinranka 12:9a20164dcc47 663
vipinranka 12:9a20164dcc47 664 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 665 return(result);
vipinranka 12:9a20164dcc47 666 }
vipinranka 12:9a20164dcc47 667
vipinranka 12:9a20164dcc47 668
vipinranka 12:9a20164dcc47 669 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
vipinranka 12:9a20164dcc47 670 /* IAR iccarm specific functions */
vipinranka 12:9a20164dcc47 671 #include <cmsis_iar.h>
vipinranka 12:9a20164dcc47 672
vipinranka 12:9a20164dcc47 673
vipinranka 12:9a20164dcc47 674 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
vipinranka 12:9a20164dcc47 675 /* TI CCS specific functions */
vipinranka 12:9a20164dcc47 676 #include <cmsis_ccs.h>
vipinranka 12:9a20164dcc47 677
vipinranka 12:9a20164dcc47 678
vipinranka 12:9a20164dcc47 679 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
vipinranka 12:9a20164dcc47 680 /* TASKING carm specific functions */
vipinranka 12:9a20164dcc47 681 /* not yet supported */
vipinranka 12:9a20164dcc47 682
vipinranka 12:9a20164dcc47 683
vipinranka 12:9a20164dcc47 684 #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
vipinranka 12:9a20164dcc47 685 /* Cosmic specific functions */
vipinranka 12:9a20164dcc47 686 #include <cmsis_csm.h>
vipinranka 12:9a20164dcc47 687
vipinranka 12:9a20164dcc47 688 #endif
vipinranka 12:9a20164dcc47 689
vipinranka 12:9a20164dcc47 690 /*@} end of group CMSIS_SIMD_intrinsics */
vipinranka 12:9a20164dcc47 691
vipinranka 12:9a20164dcc47 692
vipinranka 12:9a20164dcc47 693 #ifdef __cplusplus
vipinranka 12:9a20164dcc47 694 }
vipinranka 12:9a20164dcc47 695 #endif
vipinranka 12:9a20164dcc47 696
vipinranka 12:9a20164dcc47 697 #endif /* __CORE_CMSIMD_H */