Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of OmniWheels by
arm_cfft_radix2_q15.c
00001 /* ---------------------------------------------------------------------- 00002 * Copyright (C) 2010-2014 ARM Limited. All rights reserved. 00003 * 00004 * $Date: 19. March 2015 00005 * $Revision: V.1.4.5 00006 * 00007 * Project: CMSIS DSP Library 00008 * Title: arm_cfft_radix2_q15.c 00009 * 00010 * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function 00011 * 00012 * 00013 * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 00014 * 00015 * Redistribution and use in source and binary forms, with or without 00016 * modification, are permitted provided that the following conditions 00017 * are met: 00018 * - Redistributions of source code must retain the above copyright 00019 * notice, this list of conditions and the following disclaimer. 00020 * - Redistributions in binary form must reproduce the above copyright 00021 * notice, this list of conditions and the following disclaimer in 00022 * the documentation and/or other materials provided with the 00023 * distribution. 00024 * - Neither the name of ARM LIMITED nor the names of its contributors 00025 * may be used to endorse or promote products derived from this 00026 * software without specific prior written permission. 00027 * 00028 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 00029 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 00030 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 00031 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 00032 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 00033 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 00034 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 00035 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 00036 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 00037 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 00038 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 00039 * POSSIBILITY OF SUCH DAMAGE. 00040 * -------------------------------------------------------------------- */ 00041 00042 #include "arm_math.h" 00043 00044 void arm_radix2_butterfly_q15( 00045 q15_t * pSrc, 00046 uint32_t fftLen, 00047 q15_t * pCoef, 00048 uint16_t twidCoefModifier); 00049 00050 void arm_radix2_butterfly_inverse_q15( 00051 q15_t * pSrc, 00052 uint32_t fftLen, 00053 q15_t * pCoef, 00054 uint16_t twidCoefModifier); 00055 00056 void arm_bitreversal_q15( 00057 q15_t * pSrc, 00058 uint32_t fftLen, 00059 uint16_t bitRevFactor, 00060 uint16_t * pBitRevTab); 00061 00062 /** 00063 * @ingroup groupTransforms 00064 */ 00065 00066 /** 00067 * @addtogroup ComplexFFT 00068 * @{ 00069 */ 00070 00071 /** 00072 * @details 00073 * @brief Processing function for the fixed-point CFFT/CIFFT. 00074 * @deprecated Do not use this function. It has been superseded by \ref arm_cfft_q15 and will be removed 00075 * @param[in] *S points to an instance of the fixed-point CFFT/CIFFT structure. 00076 * @param[in, out] *pSrc points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place. 00077 * @return none. 00078 */ 00079 00080 void arm_cfft_radix2_q15( 00081 const arm_cfft_radix2_instance_q15 * S, 00082 q15_t * pSrc) 00083 { 00084 00085 if(S->ifftFlag == 1u) 00086 { 00087 arm_radix2_butterfly_inverse_q15(pSrc, S->fftLen, 00088 S->pTwiddle, S->twidCoefModifier); 00089 } 00090 else 00091 { 00092 arm_radix2_butterfly_q15(pSrc, S->fftLen, 00093 S->pTwiddle, S->twidCoefModifier); 00094 } 00095 00096 arm_bitreversal_q15(pSrc, S->fftLen, S->bitRevFactor, S->pBitRevTable); 00097 } 00098 00099 /** 00100 * @} end of ComplexFFT group 00101 */ 00102 00103 void arm_radix2_butterfly_q15( 00104 q15_t * pSrc, 00105 uint32_t fftLen, 00106 q15_t * pCoef, 00107 uint16_t twidCoefModifier) 00108 { 00109 #ifndef ARM_MATH_CM0_FAMILY 00110 00111 unsigned i, j, k, l; 00112 unsigned n1, n2, ia; 00113 q15_t in; 00114 q31_t T, S, R; 00115 q31_t coeff, out1, out2; 00116 00117 //N = fftLen; 00118 n2 = fftLen; 00119 00120 n1 = n2; 00121 n2 = n2 >> 1; 00122 ia = 0; 00123 00124 // loop for groups 00125 for (i = 0; i < n2; i++) 00126 { 00127 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00128 00129 ia = ia + twidCoefModifier; 00130 00131 l = i + n2; 00132 00133 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00134 in = ((int16_t) (T & 0xFFFF)) >> 1; 00135 T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00136 00137 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00138 in = ((int16_t) (S & 0xFFFF)) >> 1; 00139 S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00140 00141 R = __QSUB16(T, S); 00142 00143 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00144 00145 #ifndef ARM_MATH_BIG_ENDIAN 00146 00147 out1 = __SMUAD(coeff, R) >> 16; 00148 out2 = __SMUSDX(coeff, R); 00149 00150 #else 00151 00152 out1 = __SMUSDX(R, coeff) >> 16u; 00153 out2 = __SMUAD(coeff, R); 00154 00155 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00156 00157 _SIMD32_OFFSET(pSrc + (2u * l)) = 00158 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00159 00160 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00161 00162 ia = ia + twidCoefModifier; 00163 00164 // loop for butterfly 00165 i++; 00166 l++; 00167 00168 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00169 in = ((int16_t) (T & 0xFFFF)) >> 1; 00170 T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00171 00172 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00173 in = ((int16_t) (S & 0xFFFF)) >> 1; 00174 S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00175 00176 R = __QSUB16(T, S); 00177 00178 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00179 00180 #ifndef ARM_MATH_BIG_ENDIAN 00181 00182 out1 = __SMUAD(coeff, R) >> 16; 00183 out2 = __SMUSDX(coeff, R); 00184 00185 #else 00186 00187 out1 = __SMUSDX(R, coeff) >> 16u; 00188 out2 = __SMUAD(coeff, R); 00189 00190 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00191 00192 _SIMD32_OFFSET(pSrc + (2u * l)) = 00193 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00194 00195 } // groups loop end 00196 00197 twidCoefModifier = twidCoefModifier << 1u; 00198 00199 // loop for stage 00200 for (k = fftLen / 2; k > 2; k = k >> 1) 00201 { 00202 n1 = n2; 00203 n2 = n2 >> 1; 00204 ia = 0; 00205 00206 // loop for groups 00207 for (j = 0; j < n2; j++) 00208 { 00209 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00210 00211 ia = ia + twidCoefModifier; 00212 00213 // loop for butterfly 00214 for (i = j; i < fftLen; i += n1) 00215 { 00216 l = i + n2; 00217 00218 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00219 00220 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00221 00222 R = __QSUB16(T, S); 00223 00224 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00225 00226 #ifndef ARM_MATH_BIG_ENDIAN 00227 00228 out1 = __SMUAD(coeff, R) >> 16; 00229 out2 = __SMUSDX(coeff, R); 00230 00231 #else 00232 00233 out1 = __SMUSDX(R, coeff) >> 16u; 00234 out2 = __SMUAD(coeff, R); 00235 00236 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00237 00238 _SIMD32_OFFSET(pSrc + (2u * l)) = 00239 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00240 00241 i += n1; 00242 00243 l = i + n2; 00244 00245 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00246 00247 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00248 00249 R = __QSUB16(T, S); 00250 00251 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00252 00253 #ifndef ARM_MATH_BIG_ENDIAN 00254 00255 out1 = __SMUAD(coeff, R) >> 16; 00256 out2 = __SMUSDX(coeff, R); 00257 00258 #else 00259 00260 out1 = __SMUSDX(R, coeff) >> 16u; 00261 out2 = __SMUAD(coeff, R); 00262 00263 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00264 00265 _SIMD32_OFFSET(pSrc + (2u * l)) = 00266 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00267 00268 } // butterfly loop end 00269 00270 } // groups loop end 00271 00272 twidCoefModifier = twidCoefModifier << 1u; 00273 } // stages loop end 00274 00275 n1 = n2; 00276 n2 = n2 >> 1; 00277 ia = 0; 00278 00279 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00280 00281 ia = ia + twidCoefModifier; 00282 00283 // loop for butterfly 00284 for (i = 0; i < fftLen; i += n1) 00285 { 00286 l = i + n2; 00287 00288 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00289 00290 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00291 00292 R = __QSUB16(T, S); 00293 00294 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); 00295 00296 _SIMD32_OFFSET(pSrc + (2u * l)) = R; 00297 00298 i += n1; 00299 l = i + n2; 00300 00301 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00302 00303 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00304 00305 R = __QSUB16(T, S); 00306 00307 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); 00308 00309 _SIMD32_OFFSET(pSrc + (2u * l)) = R; 00310 00311 } // groups loop end 00312 00313 00314 #else 00315 00316 unsigned i, j, k, l; 00317 unsigned n1, n2, ia; 00318 q15_t xt, yt, cosVal, sinVal; 00319 00320 00321 //N = fftLen; 00322 n2 = fftLen; 00323 00324 n1 = n2; 00325 n2 = n2 >> 1; 00326 ia = 0; 00327 00328 // loop for groups 00329 for (j = 0; j < n2; j++) 00330 { 00331 cosVal = pCoef[ia * 2]; 00332 sinVal = pCoef[(ia * 2) + 1]; 00333 ia = ia + twidCoefModifier; 00334 00335 // loop for butterfly 00336 for (i = j; i < fftLen; i += n1) 00337 { 00338 l = i + n2; 00339 xt = (pSrc[2 * i] >> 1u) - (pSrc[2 * l] >> 1u); 00340 pSrc[2 * i] = ((pSrc[2 * i] >> 1u) + (pSrc[2 * l] >> 1u)) >> 1u; 00341 00342 yt = (pSrc[2 * i + 1] >> 1u) - (pSrc[2 * l + 1] >> 1u); 00343 pSrc[2 * i + 1] = 00344 ((pSrc[2 * l + 1] >> 1u) + (pSrc[2 * i + 1] >> 1u)) >> 1u; 00345 00346 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) + 00347 ((int16_t) (((q31_t) yt * sinVal) >> 16))); 00348 00349 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) - 00350 ((int16_t) (((q31_t) xt * sinVal) >> 16))); 00351 00352 } // butterfly loop end 00353 00354 } // groups loop end 00355 00356 twidCoefModifier = twidCoefModifier << 1u; 00357 00358 // loop for stage 00359 for (k = fftLen / 2; k > 2; k = k >> 1) 00360 { 00361 n1 = n2; 00362 n2 = n2 >> 1; 00363 ia = 0; 00364 00365 // loop for groups 00366 for (j = 0; j < n2; j++) 00367 { 00368 cosVal = pCoef[ia * 2]; 00369 sinVal = pCoef[(ia * 2) + 1]; 00370 ia = ia + twidCoefModifier; 00371 00372 // loop for butterfly 00373 for (i = j; i < fftLen; i += n1) 00374 { 00375 l = i + n2; 00376 xt = pSrc[2 * i] - pSrc[2 * l]; 00377 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u; 00378 00379 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; 00380 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u; 00381 00382 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) + 00383 ((int16_t) (((q31_t) yt * sinVal) >> 16))); 00384 00385 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) - 00386 ((int16_t) (((q31_t) xt * sinVal) >> 16))); 00387 00388 } // butterfly loop end 00389 00390 } // groups loop end 00391 00392 twidCoefModifier = twidCoefModifier << 1u; 00393 } // stages loop end 00394 00395 n1 = n2; 00396 n2 = n2 >> 1; 00397 ia = 0; 00398 00399 // loop for groups 00400 for (j = 0; j < n2; j++) 00401 { 00402 cosVal = pCoef[ia * 2]; 00403 sinVal = pCoef[(ia * 2) + 1]; 00404 00405 ia = ia + twidCoefModifier; 00406 00407 // loop for butterfly 00408 for (i = j; i < fftLen; i += n1) 00409 { 00410 l = i + n2; 00411 xt = pSrc[2 * i] - pSrc[2 * l]; 00412 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]); 00413 00414 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; 00415 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]); 00416 00417 pSrc[2u * l] = xt; 00418 00419 pSrc[2u * l + 1u] = yt; 00420 00421 } // butterfly loop end 00422 00423 } // groups loop end 00424 00425 twidCoefModifier = twidCoefModifier << 1u; 00426 00427 #endif // #ifndef ARM_MATH_CM0_FAMILY 00428 00429 } 00430 00431 00432 void arm_radix2_butterfly_inverse_q15( 00433 q15_t * pSrc, 00434 uint32_t fftLen, 00435 q15_t * pCoef, 00436 uint16_t twidCoefModifier) 00437 { 00438 #ifndef ARM_MATH_CM0_FAMILY 00439 00440 unsigned i, j, k, l; 00441 unsigned n1, n2, ia; 00442 q15_t in; 00443 q31_t T, S, R; 00444 q31_t coeff, out1, out2; 00445 00446 //N = fftLen; 00447 n2 = fftLen; 00448 00449 n1 = n2; 00450 n2 = n2 >> 1; 00451 ia = 0; 00452 00453 // loop for groups 00454 for (i = 0; i < n2; i++) 00455 { 00456 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00457 00458 ia = ia + twidCoefModifier; 00459 00460 l = i + n2; 00461 00462 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00463 in = ((int16_t) (T & 0xFFFF)) >> 1; 00464 T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00465 00466 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00467 in = ((int16_t) (S & 0xFFFF)) >> 1; 00468 S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00469 00470 R = __QSUB16(T, S); 00471 00472 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00473 00474 #ifndef ARM_MATH_BIG_ENDIAN 00475 00476 out1 = __SMUSD(coeff, R) >> 16; 00477 out2 = __SMUADX(coeff, R); 00478 #else 00479 00480 out1 = __SMUADX(R, coeff) >> 16u; 00481 out2 = __SMUSD(__QSUB(0, coeff), R); 00482 00483 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00484 00485 _SIMD32_OFFSET(pSrc + (2u * l)) = 00486 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00487 00488 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00489 00490 ia = ia + twidCoefModifier; 00491 00492 // loop for butterfly 00493 i++; 00494 l++; 00495 00496 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00497 in = ((int16_t) (T & 0xFFFF)) >> 1; 00498 T = ((T >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00499 00500 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00501 in = ((int16_t) (S & 0xFFFF)) >> 1; 00502 S = ((S >> 1) & 0xFFFF0000) | (in & 0xFFFF); 00503 00504 R = __QSUB16(T, S); 00505 00506 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00507 00508 #ifndef ARM_MATH_BIG_ENDIAN 00509 00510 out1 = __SMUSD(coeff, R) >> 16; 00511 out2 = __SMUADX(coeff, R); 00512 #else 00513 00514 out1 = __SMUADX(R, coeff) >> 16u; 00515 out2 = __SMUSD(__QSUB(0, coeff), R); 00516 00517 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00518 00519 _SIMD32_OFFSET(pSrc + (2u * l)) = 00520 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00521 00522 } // groups loop end 00523 00524 twidCoefModifier = twidCoefModifier << 1u; 00525 00526 // loop for stage 00527 for (k = fftLen / 2; k > 2; k = k >> 1) 00528 { 00529 n1 = n2; 00530 n2 = n2 >> 1; 00531 ia = 0; 00532 00533 // loop for groups 00534 for (j = 0; j < n2; j++) 00535 { 00536 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00537 00538 ia = ia + twidCoefModifier; 00539 00540 // loop for butterfly 00541 for (i = j; i < fftLen; i += n1) 00542 { 00543 l = i + n2; 00544 00545 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00546 00547 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00548 00549 R = __QSUB16(T, S); 00550 00551 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00552 00553 #ifndef ARM_MATH_BIG_ENDIAN 00554 00555 out1 = __SMUSD(coeff, R) >> 16; 00556 out2 = __SMUADX(coeff, R); 00557 00558 #else 00559 00560 out1 = __SMUADX(R, coeff) >> 16u; 00561 out2 = __SMUSD(__QSUB(0, coeff), R); 00562 00563 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00564 00565 _SIMD32_OFFSET(pSrc + (2u * l)) = 00566 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00567 00568 i += n1; 00569 00570 l = i + n2; 00571 00572 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00573 00574 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00575 00576 R = __QSUB16(T, S); 00577 00578 _SIMD32_OFFSET(pSrc + (2 * i)) = __SHADD16(T, S); 00579 00580 #ifndef ARM_MATH_BIG_ENDIAN 00581 00582 out1 = __SMUSD(coeff, R) >> 16; 00583 out2 = __SMUADX(coeff, R); 00584 #else 00585 00586 out1 = __SMUADX(R, coeff) >> 16u; 00587 out2 = __SMUSD(__QSUB(0, coeff), R); 00588 00589 #endif // #ifndef ARM_MATH_BIG_ENDIAN 00590 00591 _SIMD32_OFFSET(pSrc + (2u * l)) = 00592 (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF); 00593 00594 } // butterfly loop end 00595 00596 } // groups loop end 00597 00598 twidCoefModifier = twidCoefModifier << 1u; 00599 } // stages loop end 00600 00601 n1 = n2; 00602 n2 = n2 >> 1; 00603 ia = 0; 00604 00605 // loop for groups 00606 for (j = 0; j < n2; j++) 00607 { 00608 coeff = _SIMD32_OFFSET(pCoef + (ia * 2u)); 00609 00610 ia = ia + twidCoefModifier; 00611 00612 // loop for butterfly 00613 for (i = j; i < fftLen; i += n1) 00614 { 00615 l = i + n2; 00616 00617 T = _SIMD32_OFFSET(pSrc + (2 * i)); 00618 00619 S = _SIMD32_OFFSET(pSrc + (2 * l)); 00620 00621 R = __QSUB16(T, S); 00622 00623 _SIMD32_OFFSET(pSrc + (2 * i)) = __QADD16(T, S); 00624 00625 _SIMD32_OFFSET(pSrc + (2u * l)) = R; 00626 00627 } // butterfly loop end 00628 00629 } // groups loop end 00630 00631 twidCoefModifier = twidCoefModifier << 1u; 00632 00633 #else 00634 00635 00636 unsigned i, j, k, l; 00637 unsigned n1, n2, ia; 00638 q15_t xt, yt, cosVal, sinVal; 00639 00640 //N = fftLen; 00641 n2 = fftLen; 00642 00643 n1 = n2; 00644 n2 = n2 >> 1; 00645 ia = 0; 00646 00647 // loop for groups 00648 for (j = 0; j < n2; j++) 00649 { 00650 cosVal = pCoef[ia * 2]; 00651 sinVal = pCoef[(ia * 2) + 1]; 00652 ia = ia + twidCoefModifier; 00653 00654 // loop for butterfly 00655 for (i = j; i < fftLen; i += n1) 00656 { 00657 l = i + n2; 00658 xt = (pSrc[2 * i] >> 1u) - (pSrc[2 * l] >> 1u); 00659 pSrc[2 * i] = ((pSrc[2 * i] >> 1u) + (pSrc[2 * l] >> 1u)) >> 1u; 00660 00661 yt = (pSrc[2 * i + 1] >> 1u) - (pSrc[2 * l + 1] >> 1u); 00662 pSrc[2 * i + 1] = 00663 ((pSrc[2 * l + 1] >> 1u) + (pSrc[2 * i + 1] >> 1u)) >> 1u; 00664 00665 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) - 00666 ((int16_t) (((q31_t) yt * sinVal) >> 16))); 00667 00668 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) + 00669 ((int16_t) (((q31_t) xt * sinVal) >> 16))); 00670 00671 } // butterfly loop end 00672 00673 } // groups loop end 00674 00675 twidCoefModifier = twidCoefModifier << 1u; 00676 00677 // loop for stage 00678 for (k = fftLen / 2; k > 2; k = k >> 1) 00679 { 00680 n1 = n2; 00681 n2 = n2 >> 1; 00682 ia = 0; 00683 00684 // loop for groups 00685 for (j = 0; j < n2; j++) 00686 { 00687 cosVal = pCoef[ia * 2]; 00688 sinVal = pCoef[(ia * 2) + 1]; 00689 ia = ia + twidCoefModifier; 00690 00691 // loop for butterfly 00692 for (i = j; i < fftLen; i += n1) 00693 { 00694 l = i + n2; 00695 xt = pSrc[2 * i] - pSrc[2 * l]; 00696 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]) >> 1u; 00697 00698 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; 00699 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]) >> 1u; 00700 00701 pSrc[2u * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16)) - 00702 ((int16_t) (((q31_t) yt * sinVal) >> 16))); 00703 00704 pSrc[2u * l + 1u] = (((int16_t) (((q31_t) yt * cosVal) >> 16)) + 00705 ((int16_t) (((q31_t) xt * sinVal) >> 16))); 00706 00707 } // butterfly loop end 00708 00709 } // groups loop end 00710 00711 twidCoefModifier = twidCoefModifier << 1u; 00712 } // stages loop end 00713 00714 n1 = n2; 00715 n2 = n2 >> 1; 00716 ia = 0; 00717 00718 cosVal = pCoef[ia * 2]; 00719 sinVal = pCoef[(ia * 2) + 1]; 00720 00721 ia = ia + twidCoefModifier; 00722 00723 // loop for butterfly 00724 for (i = 0; i < fftLen; i += n1) 00725 { 00726 l = i + n2; 00727 xt = pSrc[2 * i] - pSrc[2 * l]; 00728 pSrc[2 * i] = (pSrc[2 * i] + pSrc[2 * l]); 00729 00730 yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; 00731 pSrc[2 * i + 1] = (pSrc[2 * l + 1] + pSrc[2 * i + 1]); 00732 00733 pSrc[2u * l] = xt; 00734 00735 pSrc[2u * l + 1u] = yt; 00736 00737 } // groups loop end 00738 00739 00740 #endif // #ifndef ARM_MATH_CM0_FAMILY 00741 00742 }
Generated on Fri Jul 22 2022 04:53:43 by
1.7.2
