This is the final version of Mini Gateway for Automation and Security desgined for Renesas GR Peach Design Contest

Dependencies:   GR-PEACH_video GraphicsFramework HTTPServer R_BSP mbed-rpc mbed-rtos Socket lwip-eth lwip-sys lwip FATFileSystem

Fork of mbed-os-example-mbed5-blinky by mbed-os-examples

Committer:
vipinranka
Date:
Wed Jan 11 11:41:30 2017 +0000
Revision:
12:9a20164dcc47
This is the final version MGAS Project for Renesas GR Peach Design Contest

Who changed what in which revision?

UserRevisionLine numberNew contents of line
vipinranka 12:9a20164dcc47 1 /**************************************************************************//**
vipinranka 12:9a20164dcc47 2 * @file core_cm4_simd.h
vipinranka 12:9a20164dcc47 3 * @brief CMSIS Cortex-M4 SIMD Header File
vipinranka 12:9a20164dcc47 4 * @version V3.20
vipinranka 12:9a20164dcc47 5 * @date 25. February 2013
vipinranka 12:9a20164dcc47 6 *
vipinranka 12:9a20164dcc47 7 * @note
vipinranka 12:9a20164dcc47 8 *
vipinranka 12:9a20164dcc47 9 ******************************************************************************/
vipinranka 12:9a20164dcc47 10 /* Copyright (c) 2009 - 2013 ARM LIMITED
vipinranka 12:9a20164dcc47 11
vipinranka 12:9a20164dcc47 12 All rights reserved.
vipinranka 12:9a20164dcc47 13 Redistribution and use in source and binary forms, with or without
vipinranka 12:9a20164dcc47 14 modification, are permitted provided that the following conditions are met:
vipinranka 12:9a20164dcc47 15 - Redistributions of source code must retain the above copyright
vipinranka 12:9a20164dcc47 16 notice, this list of conditions and the following disclaimer.
vipinranka 12:9a20164dcc47 17 - Redistributions in binary form must reproduce the above copyright
vipinranka 12:9a20164dcc47 18 notice, this list of conditions and the following disclaimer in the
vipinranka 12:9a20164dcc47 19 documentation and/or other materials provided with the distribution.
vipinranka 12:9a20164dcc47 20 - Neither the name of ARM nor the names of its contributors may be used
vipinranka 12:9a20164dcc47 21 to endorse or promote products derived from this software without
vipinranka 12:9a20164dcc47 22 specific prior written permission.
vipinranka 12:9a20164dcc47 23 *
vipinranka 12:9a20164dcc47 24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
vipinranka 12:9a20164dcc47 25 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
vipinranka 12:9a20164dcc47 26 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
vipinranka 12:9a20164dcc47 27 ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
vipinranka 12:9a20164dcc47 28 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
vipinranka 12:9a20164dcc47 29 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
vipinranka 12:9a20164dcc47 30 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
vipinranka 12:9a20164dcc47 31 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
vipinranka 12:9a20164dcc47 32 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
vipinranka 12:9a20164dcc47 33 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
vipinranka 12:9a20164dcc47 34 POSSIBILITY OF SUCH DAMAGE.
vipinranka 12:9a20164dcc47 35 ---------------------------------------------------------------------------*/
vipinranka 12:9a20164dcc47 36
vipinranka 12:9a20164dcc47 37
vipinranka 12:9a20164dcc47 38 #ifdef __cplusplus
vipinranka 12:9a20164dcc47 39 extern "C" {
vipinranka 12:9a20164dcc47 40 #endif
vipinranka 12:9a20164dcc47 41
vipinranka 12:9a20164dcc47 42 #ifndef __CORE_CM4_SIMD_H
vipinranka 12:9a20164dcc47 43 #define __CORE_CM4_SIMD_H
vipinranka 12:9a20164dcc47 44
vipinranka 12:9a20164dcc47 45
vipinranka 12:9a20164dcc47 46 /*******************************************************************************
vipinranka 12:9a20164dcc47 47 * Hardware Abstraction Layer
vipinranka 12:9a20164dcc47 48 ******************************************************************************/
vipinranka 12:9a20164dcc47 49
vipinranka 12:9a20164dcc47 50
vipinranka 12:9a20164dcc47 51 /* ################### Compiler specific Intrinsics ########################### */
vipinranka 12:9a20164dcc47 52 /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
vipinranka 12:9a20164dcc47 53 Access to dedicated SIMD instructions
vipinranka 12:9a20164dcc47 54 @{
vipinranka 12:9a20164dcc47 55 */
vipinranka 12:9a20164dcc47 56
vipinranka 12:9a20164dcc47 57 #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
vipinranka 12:9a20164dcc47 58 /* ARM armcc specific functions */
vipinranka 12:9a20164dcc47 59
vipinranka 12:9a20164dcc47 60 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 61 #define __SADD8 __sadd8
vipinranka 12:9a20164dcc47 62 #define __QADD8 __qadd8
vipinranka 12:9a20164dcc47 63 #define __SHADD8 __shadd8
vipinranka 12:9a20164dcc47 64 #define __UADD8 __uadd8
vipinranka 12:9a20164dcc47 65 #define __UQADD8 __uqadd8
vipinranka 12:9a20164dcc47 66 #define __UHADD8 __uhadd8
vipinranka 12:9a20164dcc47 67 #define __SSUB8 __ssub8
vipinranka 12:9a20164dcc47 68 #define __QSUB8 __qsub8
vipinranka 12:9a20164dcc47 69 #define __SHSUB8 __shsub8
vipinranka 12:9a20164dcc47 70 #define __USUB8 __usub8
vipinranka 12:9a20164dcc47 71 #define __UQSUB8 __uqsub8
vipinranka 12:9a20164dcc47 72 #define __UHSUB8 __uhsub8
vipinranka 12:9a20164dcc47 73 #define __SADD16 __sadd16
vipinranka 12:9a20164dcc47 74 #define __QADD16 __qadd16
vipinranka 12:9a20164dcc47 75 #define __SHADD16 __shadd16
vipinranka 12:9a20164dcc47 76 #define __UADD16 __uadd16
vipinranka 12:9a20164dcc47 77 #define __UQADD16 __uqadd16
vipinranka 12:9a20164dcc47 78 #define __UHADD16 __uhadd16
vipinranka 12:9a20164dcc47 79 #define __SSUB16 __ssub16
vipinranka 12:9a20164dcc47 80 #define __QSUB16 __qsub16
vipinranka 12:9a20164dcc47 81 #define __SHSUB16 __shsub16
vipinranka 12:9a20164dcc47 82 #define __USUB16 __usub16
vipinranka 12:9a20164dcc47 83 #define __UQSUB16 __uqsub16
vipinranka 12:9a20164dcc47 84 #define __UHSUB16 __uhsub16
vipinranka 12:9a20164dcc47 85 #define __SASX __sasx
vipinranka 12:9a20164dcc47 86 #define __QASX __qasx
vipinranka 12:9a20164dcc47 87 #define __SHASX __shasx
vipinranka 12:9a20164dcc47 88 #define __UASX __uasx
vipinranka 12:9a20164dcc47 89 #define __UQASX __uqasx
vipinranka 12:9a20164dcc47 90 #define __UHASX __uhasx
vipinranka 12:9a20164dcc47 91 #define __SSAX __ssax
vipinranka 12:9a20164dcc47 92 #define __QSAX __qsax
vipinranka 12:9a20164dcc47 93 #define __SHSAX __shsax
vipinranka 12:9a20164dcc47 94 #define __USAX __usax
vipinranka 12:9a20164dcc47 95 #define __UQSAX __uqsax
vipinranka 12:9a20164dcc47 96 #define __UHSAX __uhsax
vipinranka 12:9a20164dcc47 97 #define __USAD8 __usad8
vipinranka 12:9a20164dcc47 98 #define __USADA8 __usada8
vipinranka 12:9a20164dcc47 99 #define __SSAT16 __ssat16
vipinranka 12:9a20164dcc47 100 #define __USAT16 __usat16
vipinranka 12:9a20164dcc47 101 #define __UXTB16 __uxtb16
vipinranka 12:9a20164dcc47 102 #define __UXTAB16 __uxtab16
vipinranka 12:9a20164dcc47 103 #define __SXTB16 __sxtb16
vipinranka 12:9a20164dcc47 104 #define __SXTAB16 __sxtab16
vipinranka 12:9a20164dcc47 105 #define __SMUAD __smuad
vipinranka 12:9a20164dcc47 106 #define __SMUADX __smuadx
vipinranka 12:9a20164dcc47 107 #define __SMLAD __smlad
vipinranka 12:9a20164dcc47 108 #define __SMLADX __smladx
vipinranka 12:9a20164dcc47 109 #define __SMLALD __smlald
vipinranka 12:9a20164dcc47 110 #define __SMLALDX __smlaldx
vipinranka 12:9a20164dcc47 111 #define __SMUSD __smusd
vipinranka 12:9a20164dcc47 112 #define __SMUSDX __smusdx
vipinranka 12:9a20164dcc47 113 #define __SMLSD __smlsd
vipinranka 12:9a20164dcc47 114 #define __SMLSDX __smlsdx
vipinranka 12:9a20164dcc47 115 #define __SMLSLD __smlsld
vipinranka 12:9a20164dcc47 116 #define __SMLSLDX __smlsldx
vipinranka 12:9a20164dcc47 117 #define __SEL __sel
vipinranka 12:9a20164dcc47 118 #define __QADD __qadd
vipinranka 12:9a20164dcc47 119 #define __QSUB __qsub
vipinranka 12:9a20164dcc47 120
vipinranka 12:9a20164dcc47 121 #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
vipinranka 12:9a20164dcc47 122 ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
vipinranka 12:9a20164dcc47 123
vipinranka 12:9a20164dcc47 124 #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
vipinranka 12:9a20164dcc47 125 ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
vipinranka 12:9a20164dcc47 126
vipinranka 12:9a20164dcc47 127 #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
vipinranka 12:9a20164dcc47 128 ((int64_t)(ARG3) << 32) ) >> 32))
vipinranka 12:9a20164dcc47 129
vipinranka 12:9a20164dcc47 130 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 131
vipinranka 12:9a20164dcc47 132
vipinranka 12:9a20164dcc47 133
vipinranka 12:9a20164dcc47 134 #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
vipinranka 12:9a20164dcc47 135 /* IAR iccarm specific functions */
vipinranka 12:9a20164dcc47 136
vipinranka 12:9a20164dcc47 137 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 138 #include <cmsis_iar.h>
vipinranka 12:9a20164dcc47 139
vipinranka 12:9a20164dcc47 140 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 141
vipinranka 12:9a20164dcc47 142
vipinranka 12:9a20164dcc47 143
vipinranka 12:9a20164dcc47 144 #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
vipinranka 12:9a20164dcc47 145 /* TI CCS specific functions */
vipinranka 12:9a20164dcc47 146
vipinranka 12:9a20164dcc47 147 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 148 #include <cmsis_ccs.h>
vipinranka 12:9a20164dcc47 149
vipinranka 12:9a20164dcc47 150 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 151
vipinranka 12:9a20164dcc47 152
vipinranka 12:9a20164dcc47 153
vipinranka 12:9a20164dcc47 154 #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
vipinranka 12:9a20164dcc47 155 /* GNU gcc specific functions */
vipinranka 12:9a20164dcc47 156
vipinranka 12:9a20164dcc47 157 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 158 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 159 {
vipinranka 12:9a20164dcc47 160 uint32_t result;
vipinranka 12:9a20164dcc47 161
vipinranka 12:9a20164dcc47 162 __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 163 return(result);
vipinranka 12:9a20164dcc47 164 }
vipinranka 12:9a20164dcc47 165
vipinranka 12:9a20164dcc47 166 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 167 {
vipinranka 12:9a20164dcc47 168 uint32_t result;
vipinranka 12:9a20164dcc47 169
vipinranka 12:9a20164dcc47 170 __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 171 return(result);
vipinranka 12:9a20164dcc47 172 }
vipinranka 12:9a20164dcc47 173
vipinranka 12:9a20164dcc47 174 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 175 {
vipinranka 12:9a20164dcc47 176 uint32_t result;
vipinranka 12:9a20164dcc47 177
vipinranka 12:9a20164dcc47 178 __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 179 return(result);
vipinranka 12:9a20164dcc47 180 }
vipinranka 12:9a20164dcc47 181
vipinranka 12:9a20164dcc47 182 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 183 {
vipinranka 12:9a20164dcc47 184 uint32_t result;
vipinranka 12:9a20164dcc47 185
vipinranka 12:9a20164dcc47 186 __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 187 return(result);
vipinranka 12:9a20164dcc47 188 }
vipinranka 12:9a20164dcc47 189
vipinranka 12:9a20164dcc47 190 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 191 {
vipinranka 12:9a20164dcc47 192 uint32_t result;
vipinranka 12:9a20164dcc47 193
vipinranka 12:9a20164dcc47 194 __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 195 return(result);
vipinranka 12:9a20164dcc47 196 }
vipinranka 12:9a20164dcc47 197
vipinranka 12:9a20164dcc47 198 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 199 {
vipinranka 12:9a20164dcc47 200 uint32_t result;
vipinranka 12:9a20164dcc47 201
vipinranka 12:9a20164dcc47 202 __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 203 return(result);
vipinranka 12:9a20164dcc47 204 }
vipinranka 12:9a20164dcc47 205
vipinranka 12:9a20164dcc47 206
vipinranka 12:9a20164dcc47 207 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 208 {
vipinranka 12:9a20164dcc47 209 uint32_t result;
vipinranka 12:9a20164dcc47 210
vipinranka 12:9a20164dcc47 211 __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 212 return(result);
vipinranka 12:9a20164dcc47 213 }
vipinranka 12:9a20164dcc47 214
vipinranka 12:9a20164dcc47 215 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 216 {
vipinranka 12:9a20164dcc47 217 uint32_t result;
vipinranka 12:9a20164dcc47 218
vipinranka 12:9a20164dcc47 219 __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 220 return(result);
vipinranka 12:9a20164dcc47 221 }
vipinranka 12:9a20164dcc47 222
vipinranka 12:9a20164dcc47 223 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 224 {
vipinranka 12:9a20164dcc47 225 uint32_t result;
vipinranka 12:9a20164dcc47 226
vipinranka 12:9a20164dcc47 227 __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 228 return(result);
vipinranka 12:9a20164dcc47 229 }
vipinranka 12:9a20164dcc47 230
vipinranka 12:9a20164dcc47 231 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 232 {
vipinranka 12:9a20164dcc47 233 uint32_t result;
vipinranka 12:9a20164dcc47 234
vipinranka 12:9a20164dcc47 235 __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 236 return(result);
vipinranka 12:9a20164dcc47 237 }
vipinranka 12:9a20164dcc47 238
vipinranka 12:9a20164dcc47 239 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 240 {
vipinranka 12:9a20164dcc47 241 uint32_t result;
vipinranka 12:9a20164dcc47 242
vipinranka 12:9a20164dcc47 243 __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 244 return(result);
vipinranka 12:9a20164dcc47 245 }
vipinranka 12:9a20164dcc47 246
vipinranka 12:9a20164dcc47 247 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 248 {
vipinranka 12:9a20164dcc47 249 uint32_t result;
vipinranka 12:9a20164dcc47 250
vipinranka 12:9a20164dcc47 251 __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 252 return(result);
vipinranka 12:9a20164dcc47 253 }
vipinranka 12:9a20164dcc47 254
vipinranka 12:9a20164dcc47 255
vipinranka 12:9a20164dcc47 256 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 257 {
vipinranka 12:9a20164dcc47 258 uint32_t result;
vipinranka 12:9a20164dcc47 259
vipinranka 12:9a20164dcc47 260 __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 261 return(result);
vipinranka 12:9a20164dcc47 262 }
vipinranka 12:9a20164dcc47 263
vipinranka 12:9a20164dcc47 264 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 265 {
vipinranka 12:9a20164dcc47 266 uint32_t result;
vipinranka 12:9a20164dcc47 267
vipinranka 12:9a20164dcc47 268 __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 269 return(result);
vipinranka 12:9a20164dcc47 270 }
vipinranka 12:9a20164dcc47 271
vipinranka 12:9a20164dcc47 272 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 273 {
vipinranka 12:9a20164dcc47 274 uint32_t result;
vipinranka 12:9a20164dcc47 275
vipinranka 12:9a20164dcc47 276 __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 277 return(result);
vipinranka 12:9a20164dcc47 278 }
vipinranka 12:9a20164dcc47 279
vipinranka 12:9a20164dcc47 280 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 281 {
vipinranka 12:9a20164dcc47 282 uint32_t result;
vipinranka 12:9a20164dcc47 283
vipinranka 12:9a20164dcc47 284 __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 285 return(result);
vipinranka 12:9a20164dcc47 286 }
vipinranka 12:9a20164dcc47 287
vipinranka 12:9a20164dcc47 288 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 289 {
vipinranka 12:9a20164dcc47 290 uint32_t result;
vipinranka 12:9a20164dcc47 291
vipinranka 12:9a20164dcc47 292 __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 293 return(result);
vipinranka 12:9a20164dcc47 294 }
vipinranka 12:9a20164dcc47 295
vipinranka 12:9a20164dcc47 296 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 297 {
vipinranka 12:9a20164dcc47 298 uint32_t result;
vipinranka 12:9a20164dcc47 299
vipinranka 12:9a20164dcc47 300 __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 301 return(result);
vipinranka 12:9a20164dcc47 302 }
vipinranka 12:9a20164dcc47 303
vipinranka 12:9a20164dcc47 304 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 305 {
vipinranka 12:9a20164dcc47 306 uint32_t result;
vipinranka 12:9a20164dcc47 307
vipinranka 12:9a20164dcc47 308 __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 309 return(result);
vipinranka 12:9a20164dcc47 310 }
vipinranka 12:9a20164dcc47 311
vipinranka 12:9a20164dcc47 312 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 313 {
vipinranka 12:9a20164dcc47 314 uint32_t result;
vipinranka 12:9a20164dcc47 315
vipinranka 12:9a20164dcc47 316 __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 317 return(result);
vipinranka 12:9a20164dcc47 318 }
vipinranka 12:9a20164dcc47 319
vipinranka 12:9a20164dcc47 320 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 321 {
vipinranka 12:9a20164dcc47 322 uint32_t result;
vipinranka 12:9a20164dcc47 323
vipinranka 12:9a20164dcc47 324 __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 325 return(result);
vipinranka 12:9a20164dcc47 326 }
vipinranka 12:9a20164dcc47 327
vipinranka 12:9a20164dcc47 328 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 329 {
vipinranka 12:9a20164dcc47 330 uint32_t result;
vipinranka 12:9a20164dcc47 331
vipinranka 12:9a20164dcc47 332 __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 333 return(result);
vipinranka 12:9a20164dcc47 334 }
vipinranka 12:9a20164dcc47 335
vipinranka 12:9a20164dcc47 336 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 337 {
vipinranka 12:9a20164dcc47 338 uint32_t result;
vipinranka 12:9a20164dcc47 339
vipinranka 12:9a20164dcc47 340 __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 341 return(result);
vipinranka 12:9a20164dcc47 342 }
vipinranka 12:9a20164dcc47 343
vipinranka 12:9a20164dcc47 344 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 345 {
vipinranka 12:9a20164dcc47 346 uint32_t result;
vipinranka 12:9a20164dcc47 347
vipinranka 12:9a20164dcc47 348 __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 349 return(result);
vipinranka 12:9a20164dcc47 350 }
vipinranka 12:9a20164dcc47 351
vipinranka 12:9a20164dcc47 352 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 353 {
vipinranka 12:9a20164dcc47 354 uint32_t result;
vipinranka 12:9a20164dcc47 355
vipinranka 12:9a20164dcc47 356 __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 357 return(result);
vipinranka 12:9a20164dcc47 358 }
vipinranka 12:9a20164dcc47 359
vipinranka 12:9a20164dcc47 360 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 361 {
vipinranka 12:9a20164dcc47 362 uint32_t result;
vipinranka 12:9a20164dcc47 363
vipinranka 12:9a20164dcc47 364 __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 365 return(result);
vipinranka 12:9a20164dcc47 366 }
vipinranka 12:9a20164dcc47 367
vipinranka 12:9a20164dcc47 368 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 369 {
vipinranka 12:9a20164dcc47 370 uint32_t result;
vipinranka 12:9a20164dcc47 371
vipinranka 12:9a20164dcc47 372 __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 373 return(result);
vipinranka 12:9a20164dcc47 374 }
vipinranka 12:9a20164dcc47 375
vipinranka 12:9a20164dcc47 376 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 377 {
vipinranka 12:9a20164dcc47 378 uint32_t result;
vipinranka 12:9a20164dcc47 379
vipinranka 12:9a20164dcc47 380 __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 381 return(result);
vipinranka 12:9a20164dcc47 382 }
vipinranka 12:9a20164dcc47 383
vipinranka 12:9a20164dcc47 384 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 385 {
vipinranka 12:9a20164dcc47 386 uint32_t result;
vipinranka 12:9a20164dcc47 387
vipinranka 12:9a20164dcc47 388 __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 389 return(result);
vipinranka 12:9a20164dcc47 390 }
vipinranka 12:9a20164dcc47 391
vipinranka 12:9a20164dcc47 392 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 393 {
vipinranka 12:9a20164dcc47 394 uint32_t result;
vipinranka 12:9a20164dcc47 395
vipinranka 12:9a20164dcc47 396 __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 397 return(result);
vipinranka 12:9a20164dcc47 398 }
vipinranka 12:9a20164dcc47 399
vipinranka 12:9a20164dcc47 400 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 401 {
vipinranka 12:9a20164dcc47 402 uint32_t result;
vipinranka 12:9a20164dcc47 403
vipinranka 12:9a20164dcc47 404 __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 405 return(result);
vipinranka 12:9a20164dcc47 406 }
vipinranka 12:9a20164dcc47 407
vipinranka 12:9a20164dcc47 408 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 409 {
vipinranka 12:9a20164dcc47 410 uint32_t result;
vipinranka 12:9a20164dcc47 411
vipinranka 12:9a20164dcc47 412 __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 413 return(result);
vipinranka 12:9a20164dcc47 414 }
vipinranka 12:9a20164dcc47 415
vipinranka 12:9a20164dcc47 416 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 417 {
vipinranka 12:9a20164dcc47 418 uint32_t result;
vipinranka 12:9a20164dcc47 419
vipinranka 12:9a20164dcc47 420 __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 421 return(result);
vipinranka 12:9a20164dcc47 422 }
vipinranka 12:9a20164dcc47 423
vipinranka 12:9a20164dcc47 424 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 425 {
vipinranka 12:9a20164dcc47 426 uint32_t result;
vipinranka 12:9a20164dcc47 427
vipinranka 12:9a20164dcc47 428 __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 429 return(result);
vipinranka 12:9a20164dcc47 430 }
vipinranka 12:9a20164dcc47 431
vipinranka 12:9a20164dcc47 432 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 433 {
vipinranka 12:9a20164dcc47 434 uint32_t result;
vipinranka 12:9a20164dcc47 435
vipinranka 12:9a20164dcc47 436 __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 437 return(result);
vipinranka 12:9a20164dcc47 438 }
vipinranka 12:9a20164dcc47 439
vipinranka 12:9a20164dcc47 440 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 441 {
vipinranka 12:9a20164dcc47 442 uint32_t result;
vipinranka 12:9a20164dcc47 443
vipinranka 12:9a20164dcc47 444 __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 445 return(result);
vipinranka 12:9a20164dcc47 446 }
vipinranka 12:9a20164dcc47 447
vipinranka 12:9a20164dcc47 448 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 449 {
vipinranka 12:9a20164dcc47 450 uint32_t result;
vipinranka 12:9a20164dcc47 451
vipinranka 12:9a20164dcc47 452 __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 453 return(result);
vipinranka 12:9a20164dcc47 454 }
vipinranka 12:9a20164dcc47 455
vipinranka 12:9a20164dcc47 456 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 457 {
vipinranka 12:9a20164dcc47 458 uint32_t result;
vipinranka 12:9a20164dcc47 459
vipinranka 12:9a20164dcc47 460 __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 461 return(result);
vipinranka 12:9a20164dcc47 462 }
vipinranka 12:9a20164dcc47 463
vipinranka 12:9a20164dcc47 464 #define __SSAT16(ARG1,ARG2) \
vipinranka 12:9a20164dcc47 465 ({ \
vipinranka 12:9a20164dcc47 466 uint32_t __RES, __ARG1 = (ARG1); \
vipinranka 12:9a20164dcc47 467 __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vipinranka 12:9a20164dcc47 468 __RES; \
vipinranka 12:9a20164dcc47 469 })
vipinranka 12:9a20164dcc47 470
vipinranka 12:9a20164dcc47 471 #define __USAT16(ARG1,ARG2) \
vipinranka 12:9a20164dcc47 472 ({ \
vipinranka 12:9a20164dcc47 473 uint32_t __RES, __ARG1 = (ARG1); \
vipinranka 12:9a20164dcc47 474 __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
vipinranka 12:9a20164dcc47 475 __RES; \
vipinranka 12:9a20164dcc47 476 })
vipinranka 12:9a20164dcc47 477
vipinranka 12:9a20164dcc47 478 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
vipinranka 12:9a20164dcc47 479 {
vipinranka 12:9a20164dcc47 480 uint32_t result;
vipinranka 12:9a20164dcc47 481
vipinranka 12:9a20164dcc47 482 __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
vipinranka 12:9a20164dcc47 483 return(result);
vipinranka 12:9a20164dcc47 484 }
vipinranka 12:9a20164dcc47 485
vipinranka 12:9a20164dcc47 486 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 487 {
vipinranka 12:9a20164dcc47 488 uint32_t result;
vipinranka 12:9a20164dcc47 489
vipinranka 12:9a20164dcc47 490 __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 491 return(result);
vipinranka 12:9a20164dcc47 492 }
vipinranka 12:9a20164dcc47 493
vipinranka 12:9a20164dcc47 494 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
vipinranka 12:9a20164dcc47 495 {
vipinranka 12:9a20164dcc47 496 uint32_t result;
vipinranka 12:9a20164dcc47 497
vipinranka 12:9a20164dcc47 498 __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
vipinranka 12:9a20164dcc47 499 return(result);
vipinranka 12:9a20164dcc47 500 }
vipinranka 12:9a20164dcc47 501
vipinranka 12:9a20164dcc47 502 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 503 {
vipinranka 12:9a20164dcc47 504 uint32_t result;
vipinranka 12:9a20164dcc47 505
vipinranka 12:9a20164dcc47 506 __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 507 return(result);
vipinranka 12:9a20164dcc47 508 }
vipinranka 12:9a20164dcc47 509
vipinranka 12:9a20164dcc47 510 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 511 {
vipinranka 12:9a20164dcc47 512 uint32_t result;
vipinranka 12:9a20164dcc47 513
vipinranka 12:9a20164dcc47 514 __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 515 return(result);
vipinranka 12:9a20164dcc47 516 }
vipinranka 12:9a20164dcc47 517
vipinranka 12:9a20164dcc47 518 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 519 {
vipinranka 12:9a20164dcc47 520 uint32_t result;
vipinranka 12:9a20164dcc47 521
vipinranka 12:9a20164dcc47 522 __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 523 return(result);
vipinranka 12:9a20164dcc47 524 }
vipinranka 12:9a20164dcc47 525
vipinranka 12:9a20164dcc47 526 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 527 {
vipinranka 12:9a20164dcc47 528 uint32_t result;
vipinranka 12:9a20164dcc47 529
vipinranka 12:9a20164dcc47 530 __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 531 return(result);
vipinranka 12:9a20164dcc47 532 }
vipinranka 12:9a20164dcc47 533
vipinranka 12:9a20164dcc47 534 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 535 {
vipinranka 12:9a20164dcc47 536 uint32_t result;
vipinranka 12:9a20164dcc47 537
vipinranka 12:9a20164dcc47 538 __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 539 return(result);
vipinranka 12:9a20164dcc47 540 }
vipinranka 12:9a20164dcc47 541
vipinranka 12:9a20164dcc47 542 #define __SMLALD(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 543 ({ \
vipinranka 12:9a20164dcc47 544 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
vipinranka 12:9a20164dcc47 545 __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vipinranka 12:9a20164dcc47 546 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vipinranka 12:9a20164dcc47 547 })
vipinranka 12:9a20164dcc47 548
vipinranka 12:9a20164dcc47 549 #define __SMLALDX(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 550 ({ \
vipinranka 12:9a20164dcc47 551 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
vipinranka 12:9a20164dcc47 552 __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vipinranka 12:9a20164dcc47 553 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vipinranka 12:9a20164dcc47 554 })
vipinranka 12:9a20164dcc47 555
vipinranka 12:9a20164dcc47 556 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 557 {
vipinranka 12:9a20164dcc47 558 uint32_t result;
vipinranka 12:9a20164dcc47 559
vipinranka 12:9a20164dcc47 560 __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 561 return(result);
vipinranka 12:9a20164dcc47 562 }
vipinranka 12:9a20164dcc47 563
vipinranka 12:9a20164dcc47 564 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 565 {
vipinranka 12:9a20164dcc47 566 uint32_t result;
vipinranka 12:9a20164dcc47 567
vipinranka 12:9a20164dcc47 568 __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 569 return(result);
vipinranka 12:9a20164dcc47 570 }
vipinranka 12:9a20164dcc47 571
vipinranka 12:9a20164dcc47 572 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 573 {
vipinranka 12:9a20164dcc47 574 uint32_t result;
vipinranka 12:9a20164dcc47 575
vipinranka 12:9a20164dcc47 576 __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 577 return(result);
vipinranka 12:9a20164dcc47 578 }
vipinranka 12:9a20164dcc47 579
vipinranka 12:9a20164dcc47 580 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
vipinranka 12:9a20164dcc47 581 {
vipinranka 12:9a20164dcc47 582 uint32_t result;
vipinranka 12:9a20164dcc47 583
vipinranka 12:9a20164dcc47 584 __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 585 return(result);
vipinranka 12:9a20164dcc47 586 }
vipinranka 12:9a20164dcc47 587
vipinranka 12:9a20164dcc47 588 #define __SMLSLD(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 589 ({ \
vipinranka 12:9a20164dcc47 590 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
vipinranka 12:9a20164dcc47 591 __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vipinranka 12:9a20164dcc47 592 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vipinranka 12:9a20164dcc47 593 })
vipinranka 12:9a20164dcc47 594
vipinranka 12:9a20164dcc47 595 #define __SMLSLDX(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 596 ({ \
vipinranka 12:9a20164dcc47 597 uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
vipinranka 12:9a20164dcc47 598 __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
vipinranka 12:9a20164dcc47 599 (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
vipinranka 12:9a20164dcc47 600 })
vipinranka 12:9a20164dcc47 601
vipinranka 12:9a20164dcc47 602 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 603 {
vipinranka 12:9a20164dcc47 604 uint32_t result;
vipinranka 12:9a20164dcc47 605
vipinranka 12:9a20164dcc47 606 __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 607 return(result);
vipinranka 12:9a20164dcc47 608 }
vipinranka 12:9a20164dcc47 609
vipinranka 12:9a20164dcc47 610 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 611 {
vipinranka 12:9a20164dcc47 612 uint32_t result;
vipinranka 12:9a20164dcc47 613
vipinranka 12:9a20164dcc47 614 __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 615 return(result);
vipinranka 12:9a20164dcc47 616 }
vipinranka 12:9a20164dcc47 617
vipinranka 12:9a20164dcc47 618 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
vipinranka 12:9a20164dcc47 619 {
vipinranka 12:9a20164dcc47 620 uint32_t result;
vipinranka 12:9a20164dcc47 621
vipinranka 12:9a20164dcc47 622 __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
vipinranka 12:9a20164dcc47 623 return(result);
vipinranka 12:9a20164dcc47 624 }
vipinranka 12:9a20164dcc47 625
vipinranka 12:9a20164dcc47 626 #define __PKHBT(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 627 ({ \
vipinranka 12:9a20164dcc47 628 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vipinranka 12:9a20164dcc47 629 __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vipinranka 12:9a20164dcc47 630 __RES; \
vipinranka 12:9a20164dcc47 631 })
vipinranka 12:9a20164dcc47 632
vipinranka 12:9a20164dcc47 633 #define __PKHTB(ARG1,ARG2,ARG3) \
vipinranka 12:9a20164dcc47 634 ({ \
vipinranka 12:9a20164dcc47 635 uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
vipinranka 12:9a20164dcc47 636 if (ARG3 == 0) \
vipinranka 12:9a20164dcc47 637 __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
vipinranka 12:9a20164dcc47 638 else \
vipinranka 12:9a20164dcc47 639 __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
vipinranka 12:9a20164dcc47 640 __RES; \
vipinranka 12:9a20164dcc47 641 })
vipinranka 12:9a20164dcc47 642
vipinranka 12:9a20164dcc47 643 __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
vipinranka 12:9a20164dcc47 644 {
vipinranka 12:9a20164dcc47 645 int32_t result;
vipinranka 12:9a20164dcc47 646
vipinranka 12:9a20164dcc47 647 __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
vipinranka 12:9a20164dcc47 648 return(result);
vipinranka 12:9a20164dcc47 649 }
vipinranka 12:9a20164dcc47 650
vipinranka 12:9a20164dcc47 651 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 652
vipinranka 12:9a20164dcc47 653
vipinranka 12:9a20164dcc47 654
vipinranka 12:9a20164dcc47 655 #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
vipinranka 12:9a20164dcc47 656 /* TASKING carm specific functions */
vipinranka 12:9a20164dcc47 657
vipinranka 12:9a20164dcc47 658
vipinranka 12:9a20164dcc47 659 /*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 660 /* not yet supported */
vipinranka 12:9a20164dcc47 661 /*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
vipinranka 12:9a20164dcc47 662
vipinranka 12:9a20164dcc47 663
vipinranka 12:9a20164dcc47 664 #endif
vipinranka 12:9a20164dcc47 665
vipinranka 12:9a20164dcc47 666 /*@} end of group CMSIS_SIMD_intrinsics */
vipinranka 12:9a20164dcc47 667
vipinranka 12:9a20164dcc47 668
vipinranka 12:9a20164dcc47 669 #endif /* __CORE_CM4_SIMD_H */
vipinranka 12:9a20164dcc47 670
vipinranka 12:9a20164dcc47 671 #ifdef __cplusplus
vipinranka 12:9a20164dcc47 672 }
vipinranka 12:9a20164dcc47 673 #endif