Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project-sd-card by
arithm_core.hpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Copyright (C) 2013, OpenCV Foundation, all rights reserved. 00016 // Copyright (C) 2015, Itseez Inc., all rights reserved. 00017 // Third party copyrights are property of their respective owners. 00018 // 00019 // Redistribution and use in source and binary forms, with or without modification, 00020 // are permitted provided that the following conditions are met: 00021 // 00022 // * Redistribution's of source code must retain the above copyright notice, 00023 // this list of conditions and the following disclaimer. 00024 // 00025 // * Redistribution's in binary form must reproduce the above copyright notice, 00026 // this list of conditions and the following disclaimer in the documentation 00027 // and/or other materials provided with the distribution. 00028 // 00029 // * The name of the copyright holders may not be used to endorse or promote products 00030 // derived from this software without specific prior written permission. 00031 // 00032 // This software is provided by the copyright holders and contributors "as is" and 00033 // any express or implied warranties, including, but not limited to, the implied 00034 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00035 // In no event shall the Intel Corporation or contributors be liable for any direct, 00036 // indirect, incidental, special, exemplary, or consequential damages 00037 // (including, but not limited to, procurement of substitute goods or services; 00038 // loss of use, data, or profits; or business interruption) however caused 00039 // and on any theory of liability, whether in contract, strict liability, 00040 // or tort (including negligence or otherwise) arising in any way out of 00041 // the use of this software, even if advised of the possibility of such damage. 00042 // 00043 //M*/ 00044 00045 #ifndef __OPENCV_ARITHM_CORE_HPP__ 00046 #define __OPENCV_ARITHM_CORE_HPP__ 00047 00048 #include "arithm_simd.hpp" 00049 00050 namespace cv { 00051 00052 template<typename T1, typename T2=T1, typename T3=T1> struct OpAdd 00053 { 00054 typedef T1 type1; 00055 typedef T2 type2; 00056 typedef T3 rtype; 00057 T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(a + b); } 00058 }; 00059 00060 template<typename T1, typename T2=T1, typename T3=T1> struct OpSub 00061 { 00062 typedef T1 type1; 00063 typedef T2 type2; 00064 typedef T3 rtype; 00065 T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(a - b); } 00066 }; 00067 00068 template<typename T1, typename T2=T1, typename T3=T1> struct OpRSub 00069 { 00070 typedef T1 type1; 00071 typedef T2 type2; 00072 typedef T3 rtype; 00073 T3 operator ()(const T1 a, const T2 b) const { return saturate_cast<T3>(b - a); } 00074 }; 00075 00076 template<typename T> struct OpMin 00077 { 00078 typedef T type1; 00079 typedef T type2; 00080 typedef T rtype; 00081 T operator ()(const T a, const T b) const { return std::min(a, b); } 00082 }; 00083 00084 template<typename T> struct OpMax 00085 { 00086 typedef T type1; 00087 typedef T type2; 00088 typedef T rtype; 00089 T operator ()(const T a, const T b) const { return std::max(a, b); } 00090 }; 00091 00092 template<typename T> struct OpAbsDiff 00093 { 00094 typedef T type1; 00095 typedef T type2; 00096 typedef T rtype; 00097 T operator()(T a, T b) const { return a > b ? a - b : b - a; } 00098 }; 00099 00100 template<typename T> struct OpAnd 00101 { 00102 typedef T type1; 00103 typedef T type2; 00104 typedef T rtype; 00105 T operator()( T a, T b ) const { return a & b; } 00106 }; 00107 00108 template<typename T> struct OpOr 00109 { 00110 typedef T type1; 00111 typedef T type2; 00112 typedef T rtype; 00113 T operator()( T a, T b ) const { return a | b; } 00114 }; 00115 00116 template<typename T> struct OpXor 00117 { 00118 typedef T type1; 00119 typedef T type2; 00120 typedef T rtype; 00121 T operator()( T a, T b ) const { return a ^ b; } 00122 }; 00123 00124 template<typename T> struct OpNot 00125 { 00126 typedef T type1; 00127 typedef T type2; 00128 typedef T rtype; 00129 T operator()( T a, T ) const { return ~a; } 00130 }; 00131 00132 //============================================================================= 00133 00134 template<typename T, class Op, class VOp> 00135 void vBinOp(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, int width, int height) 00136 { 00137 #if CV_SSE2 || CV_NEON 00138 VOp vop; 00139 #endif 00140 Op op; 00141 00142 for( ; height--; src1 = (const T *)((const uchar *)src1 + step1), 00143 src2 = (const T *)((const uchar *)src2 + step2), 00144 dst = (T *)((uchar *)dst + step) ) 00145 { 00146 int x = 0; 00147 00148 #if CV_NEON || CV_SSE2 00149 #if CV_AVX2 00150 if( USE_AVX2 ) 00151 { 00152 for( ; x <= width - 32/(int)sizeof(T); x += 32/sizeof(T) ) 00153 { 00154 typename VLoadStore256<T>::reg_type r0 = VLoadStore256<T>::load(src1 + x); 00155 r0 = vop(r0, VLoadStore256<T>::load(src2 + x)); 00156 VLoadStore256<T>::store(dst + x, r0); 00157 } 00158 } 00159 #else 00160 #if CV_SSE2 00161 if( USE_SSE2 ) 00162 { 00163 #endif // CV_SSE2 00164 for( ; x <= width - 32/(int)sizeof(T); x += 32/sizeof(T) ) 00165 { 00166 typename VLoadStore128<T>::reg_type r0 = VLoadStore128<T>::load(src1 + x ); 00167 typename VLoadStore128<T>::reg_type r1 = VLoadStore128<T>::load(src1 + x + 16/sizeof(T)); 00168 r0 = vop(r0, VLoadStore128<T>::load(src2 + x )); 00169 r1 = vop(r1, VLoadStore128<T>::load(src2 + x + 16/sizeof(T))); 00170 VLoadStore128<T>::store(dst + x , r0); 00171 VLoadStore128<T>::store(dst + x + 16/sizeof(T), r1); 00172 } 00173 #if CV_SSE2 00174 } 00175 #endif // CV_SSE2 00176 #endif // CV_AVX2 00177 #endif // CV_NEON || CV_SSE2 00178 00179 #if CV_AVX2 00180 // nothing 00181 #elif CV_SSE2 00182 if( USE_SSE2 ) 00183 { 00184 for( ; x <= width - 8/(int)sizeof(T); x += 8/sizeof(T) ) 00185 { 00186 typename VLoadStore64<T>::reg_type r = VLoadStore64<T>::load(src1 + x); 00187 r = vop(r, VLoadStore64<T>::load(src2 + x)); 00188 VLoadStore64<T>::store(dst + x, r); 00189 } 00190 } 00191 #endif 00192 00193 #if CV_ENABLE_UNROLLED 00194 for( ; x <= width - 4; x += 4 ) 00195 { 00196 T v0 = op(src1[x], src2[x]); 00197 T v1 = op(src1[x+1], src2[x+1]); 00198 dst[x] = v0; dst[x+1] = v1; 00199 v0 = op(src1[x+2], src2[x+2]); 00200 v1 = op(src1[x+3], src2[x+3]); 00201 dst[x+2] = v0; dst[x+3] = v1; 00202 } 00203 #endif 00204 00205 for( ; x < width; x++ ) 00206 dst[x] = op(src1[x], src2[x]); 00207 } 00208 } 00209 00210 template<typename T, class Op, class Op32> 00211 void vBinOp32(const T* src1, size_t step1, const T* src2, size_t step2, 00212 T* dst, size_t step, int width, int height) 00213 { 00214 #if CV_SSE2 || CV_NEON 00215 Op32 op32; 00216 #endif 00217 Op op; 00218 00219 for( ; height--; src1 = (const T *)((const uchar *)src1 + step1), 00220 src2 = (const T *)((const uchar *)src2 + step2), 00221 dst = (T *)((uchar *)dst + step) ) 00222 { 00223 int x = 0; 00224 00225 #if CV_AVX2 00226 if( USE_AVX2 ) 00227 { 00228 if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) 00229 { 00230 for( ; x <= width - 8; x += 8 ) 00231 { 00232 typename VLoadStore256Aligned<T>::reg_type r0 = VLoadStore256Aligned<T>::load(src1 + x); 00233 r0 = op32(r0, VLoadStore256Aligned<T>::load(src2 + x)); 00234 VLoadStore256Aligned<T>::store(dst + x, r0); 00235 } 00236 } 00237 } 00238 #elif CV_SSE2 00239 if( USE_SSE2 ) 00240 { 00241 if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) 00242 { 00243 for( ; x <= width - 8; x += 8 ) 00244 { 00245 typename VLoadStore128Aligned<T>::reg_type r0 = VLoadStore128Aligned<T>::load(src1 + x ); 00246 typename VLoadStore128Aligned<T>::reg_type r1 = VLoadStore128Aligned<T>::load(src1 + x + 4); 00247 r0 = op32(r0, VLoadStore128Aligned<T>::load(src2 + x )); 00248 r1 = op32(r1, VLoadStore128Aligned<T>::load(src2 + x + 4)); 00249 VLoadStore128Aligned<T>::store(dst + x , r0); 00250 VLoadStore128Aligned<T>::store(dst + x + 4, r1); 00251 } 00252 } 00253 } 00254 #endif // CV_AVX2 00255 00256 #if CV_NEON || CV_SSE2 00257 #if CV_AVX2 00258 if( USE_AVX2 ) 00259 { 00260 for( ; x <= width - 8; x += 8 ) 00261 { 00262 typename VLoadStore256<T>::reg_type r0 = VLoadStore256<T>::load(src1 + x); 00263 r0 = op32(r0, VLoadStore256<T>::load(src2 + x)); 00264 VLoadStore256<T>::store(dst + x, r0); 00265 } 00266 } 00267 #else 00268 #if CV_SSE2 00269 if( USE_SSE2 ) 00270 { 00271 #endif // CV_SSE2 00272 for( ; x <= width - 8; x += 8 ) 00273 { 00274 typename VLoadStore128<T>::reg_type r0 = VLoadStore128<T>::load(src1 + x ); 00275 typename VLoadStore128<T>::reg_type r1 = VLoadStore128<T>::load(src1 + x + 4); 00276 r0 = op32(r0, VLoadStore128<T>::load(src2 + x )); 00277 r1 = op32(r1, VLoadStore128<T>::load(src2 + x + 4)); 00278 VLoadStore128<T>::store(dst + x , r0); 00279 VLoadStore128<T>::store(dst + x + 4, r1); 00280 } 00281 #if CV_SSE2 00282 } 00283 #endif // CV_SSE2 00284 #endif // CV_AVX2 00285 #endif // CV_NEON || CV_SSE2 00286 00287 #if CV_ENABLE_UNROLLED 00288 for( ; x <= width - 4; x += 4 ) 00289 { 00290 T v0 = op(src1[x], src2[x]); 00291 T v1 = op(src1[x+1], src2[x+1]); 00292 dst[x] = v0; dst[x+1] = v1; 00293 v0 = op(src1[x+2], src2[x+2]); 00294 v1 = op(src1[x+3], src2[x+3]); 00295 dst[x+2] = v0; dst[x+3] = v1; 00296 } 00297 #endif 00298 00299 for( ; x < width; x++ ) 00300 dst[x] = op(src1[x], src2[x]); 00301 } 00302 } 00303 00304 00305 template<typename T, class Op, class Op64> 00306 void vBinOp64(const T* src1, size_t step1, const T* src2, size_t step2, 00307 T* dst, size_t step, int width, int height) 00308 { 00309 #if CV_SSE2 00310 Op64 op64; 00311 #endif 00312 Op op; 00313 00314 for( ; height--; src1 = (const T *)((const uchar *)src1 + step1), 00315 src2 = (const T *)((const uchar *)src2 + step2), 00316 dst = (T *)((uchar *)dst + step) ) 00317 { 00318 int x = 0; 00319 00320 #if CV_AVX2 00321 if( USE_AVX2 ) 00322 { 00323 if( (((size_t)src1|(size_t)src2|(size_t)dst)&31) == 0 ) 00324 { 00325 for( ; x <= width - 4; x += 4 ) 00326 { 00327 typename VLoadStore256Aligned<T>::reg_type r0 = VLoadStore256Aligned<T>::load(src1 + x); 00328 r0 = op64(r0, VLoadStore256Aligned<T>::load(src2 + x)); 00329 VLoadStore256Aligned<T>::store(dst + x, r0); 00330 } 00331 } 00332 } 00333 #elif CV_SSE2 00334 if( USE_SSE2 ) 00335 { 00336 if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) 00337 { 00338 for( ; x <= width - 4; x += 4 ) 00339 { 00340 typename VLoadStore128Aligned<T>::reg_type r0 = VLoadStore128Aligned<T>::load(src1 + x ); 00341 typename VLoadStore128Aligned<T>::reg_type r1 = VLoadStore128Aligned<T>::load(src1 + x + 2); 00342 r0 = op64(r0, VLoadStore128Aligned<T>::load(src2 + x )); 00343 r1 = op64(r1, VLoadStore128Aligned<T>::load(src2 + x + 2)); 00344 VLoadStore128Aligned<T>::store(dst + x , r0); 00345 VLoadStore128Aligned<T>::store(dst + x + 2, r1); 00346 } 00347 } 00348 } 00349 #endif 00350 00351 for( ; x <= width - 4; x += 4 ) 00352 { 00353 T v0 = op(src1[x], src2[x]); 00354 T v1 = op(src1[x+1], src2[x+1]); 00355 dst[x] = v0; dst[x+1] = v1; 00356 v0 = op(src1[x+2], src2[x+2]); 00357 v1 = op(src1[x+3], src2[x+3]); 00358 dst[x+2] = v0; dst[x+3] = v1; 00359 } 00360 00361 for( ; x < width; x++ ) 00362 dst[x] = op(src1[x], src2[x]); 00363 } 00364 } 00365 00366 template<typename T> static void 00367 cmp_(const T* src1, size_t step1, const T* src2, size_t step2, 00368 uchar* dst, size_t step, int width, int height, int code) 00369 { 00370 step1 /= sizeof(src1[0]); 00371 step2 /= sizeof(src2[0]); 00372 if( code == CMP_GE || code == CMP_LT ) 00373 { 00374 std::swap(src1, src2); 00375 std::swap(step1, step2); 00376 code = code == CMP_GE ? CMP_LE : CMP_GT; 00377 } 00378 00379 Cmp_SIMD<T> vop(code); 00380 00381 if( code == CMP_GT || code == CMP_LE ) 00382 { 00383 int m = code == CMP_GT ? 0 : 255; 00384 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00385 { 00386 int x = vop(src1, src2, dst, width); 00387 #if CV_ENABLE_UNROLLED 00388 for( ; x <= width - 4; x += 4 ) 00389 { 00390 int t0, t1; 00391 t0 = -(src1[x] > src2[x]) ^ m; 00392 t1 = -(src1[x+1] > src2[x+1]) ^ m; 00393 dst[x] = (uchar)t0; dst[x+1] = (uchar)t1; 00394 t0 = -(src1[x+2] > src2[x+2]) ^ m; 00395 t1 = -(src1[x+3] > src2[x+3]) ^ m; 00396 dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1; 00397 } 00398 #endif 00399 for( ; x < width; x++ ) 00400 dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m); 00401 } 00402 } 00403 else if( code == CMP_EQ || code == CMP_NE ) 00404 { 00405 int m = code == CMP_EQ ? 0 : 255; 00406 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00407 { 00408 int x = 0; 00409 #if CV_ENABLE_UNROLLED 00410 for( ; x <= width - 4; x += 4 ) 00411 { 00412 int t0, t1; 00413 t0 = -(src1[x] == src2[x]) ^ m; 00414 t1 = -(src1[x+1] == src2[x+1]) ^ m; 00415 dst[x] = (uchar)t0; dst[x+1] = (uchar)t1; 00416 t0 = -(src1[x+2] == src2[x+2]) ^ m; 00417 t1 = -(src1[x+3] == src2[x+3]) ^ m; 00418 dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1; 00419 } 00420 #endif 00421 for( ; x < width; x++ ) 00422 dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m); 00423 } 00424 } 00425 } 00426 00427 template<typename T, typename WT> static void 00428 mul_( const T* src1, size_t step1, const T* src2, size_t step2, 00429 T* dst, size_t step, int width, int height, WT scale ) 00430 { 00431 step1 /= sizeof(src1[0]); 00432 step2 /= sizeof(src2[0]); 00433 step /= sizeof(dst[0]); 00434 00435 Mul_SIMD<T, WT> vop; 00436 00437 if( scale == (WT)1. ) 00438 { 00439 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00440 { 00441 int i = vop(src1, src2, dst, width, scale); 00442 #if CV_ENABLE_UNROLLED 00443 for(; i <= width - 4; i += 4 ) 00444 { 00445 T t0; 00446 T t1; 00447 t0 = saturate_cast<T>(src1[i ] * src2[i ]); 00448 t1 = saturate_cast<T>(src1[i+1] * src2[i+1]); 00449 dst[i ] = t0; 00450 dst[i+1] = t1; 00451 00452 t0 = saturate_cast<T>(src1[i+2] * src2[i+2]); 00453 t1 = saturate_cast<T>(src1[i+3] * src2[i+3]); 00454 dst[i+2] = t0; 00455 dst[i+3] = t1; 00456 } 00457 #endif 00458 for( ; i < width; i++ ) 00459 dst[i] = saturate_cast<T>(src1[i] * src2[i]); 00460 } 00461 } 00462 else 00463 { 00464 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00465 { 00466 int i = vop(src1, src2, dst, width, scale); 00467 #if CV_ENABLE_UNROLLED 00468 for(; i <= width - 4; i += 4 ) 00469 { 00470 T t0 = saturate_cast<T>(scale*(WT)src1[i]*src2[i]); 00471 T t1 = saturate_cast<T>(scale*(WT)src1[i+1]*src2[i+1]); 00472 dst[i] = t0; dst[i+1] = t1; 00473 00474 t0 = saturate_cast<T>(scale*(WT)src1[i+2]*src2[i+2]); 00475 t1 = saturate_cast<T>(scale*(WT)src1[i+3]*src2[i+3]); 00476 dst[i+2] = t0; dst[i+3] = t1; 00477 } 00478 #endif 00479 for( ; i < width; i++ ) 00480 dst[i] = saturate_cast<T>(scale*(WT)src1[i]*src2[i]); 00481 } 00482 } 00483 } 00484 00485 00486 template<typename T> static void 00487 div_i( const T* src1, size_t step1, const T* src2, size_t step2, 00488 T* dst, size_t step, int width, int height, double scale ) 00489 { 00490 step1 /= sizeof(src1[0]); 00491 step2 /= sizeof(src2[0]); 00492 step /= sizeof(dst[0]); 00493 00494 Div_SIMD<T> vop; 00495 float scale_f = (float)scale; 00496 00497 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00498 { 00499 int i = vop(src1, src2, dst, width, scale); 00500 for( ; i < width; i++ ) 00501 { 00502 T num = src1[i], denom = src2[i]; 00503 dst[i] = denom != 0 ? saturate_cast<T>(num*scale_f/denom) : (T)0; 00504 } 00505 } 00506 } 00507 00508 template<typename T> static void 00509 div_f( const T* src1, size_t step1, const T* src2, size_t step2, 00510 T* dst, size_t step, int width, int height, double scale ) 00511 { 00512 T scale_f = (T)scale; 00513 step1 /= sizeof(src1[0]); 00514 step2 /= sizeof(src2[0]); 00515 step /= sizeof(dst[0]); 00516 00517 Div_SIMD<T> vop; 00518 00519 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00520 { 00521 int i = vop(src1, src2, dst, width, scale); 00522 for( ; i < width; i++ ) 00523 { 00524 T num = src1[i], denom = src2[i]; 00525 dst[i] = denom != 0 ? saturate_cast<T>(num*scale_f/denom) : (T)0; 00526 } 00527 } 00528 } 00529 00530 template<typename T> static void 00531 recip_i( const T*, size_t, const T* src2, size_t step2, 00532 T* dst, size_t step, int width, int height, double scale ) 00533 { 00534 step2 /= sizeof(src2[0]); 00535 step /= sizeof(dst[0]); 00536 00537 Recip_SIMD<T> vop; 00538 float scale_f = (float)scale; 00539 00540 for( ; height--; src2 += step2, dst += step ) 00541 { 00542 int i = vop(src2, dst, width, scale); 00543 for( ; i < width; i++ ) 00544 { 00545 T denom = src2[i]; 00546 dst[i] = denom != 0 ? saturate_cast<T>(scale_f/denom) : (T)0; 00547 } 00548 } 00549 } 00550 00551 template<typename T> static void 00552 recip_f( const T*, size_t, const T* src2, size_t step2, 00553 T* dst, size_t step, int width, int height, double scale ) 00554 { 00555 T scale_f = (T)scale; 00556 step2 /= sizeof(src2[0]); 00557 step /= sizeof(dst[0]); 00558 00559 Recip_SIMD<T> vop; 00560 00561 for( ; height--; src2 += step2, dst += step ) 00562 { 00563 int i = vop(src2, dst, width, scale); 00564 for( ; i < width; i++ ) 00565 { 00566 T denom = src2[i]; 00567 dst[i] = denom != 0 ? saturate_cast<T>(scale_f/denom) : (T)0; 00568 } 00569 } 00570 } 00571 00572 template<typename T, typename WT> static void 00573 addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2, 00574 T* dst, size_t step, int width, int height, void* _scalars ) 00575 { 00576 const double* scalars = (const double*)_scalars; 00577 WT alpha = (WT)scalars[0], beta = (WT)scalars[1], gamma = (WT)scalars[2]; 00578 step1 /= sizeof(src1[0]); 00579 step2 /= sizeof(src2[0]); 00580 step /= sizeof(dst[0]); 00581 00582 AddWeighted_SIMD<T, WT> vop; 00583 00584 for( ; height--; src1 += step1, src2 += step2, dst += step ) 00585 { 00586 int x = vop(src1, src2, dst, width, alpha, beta, gamma); 00587 #if CV_ENABLE_UNROLLED 00588 for( ; x <= width - 4; x += 4 ) 00589 { 00590 T t0 = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma); 00591 T t1 = saturate_cast<T>(src1[x+1]*alpha + src2[x+1]*beta + gamma); 00592 dst[x] = t0; dst[x+1] = t1; 00593 00594 t0 = saturate_cast<T>(src1[x+2]*alpha + src2[x+2]*beta + gamma); 00595 t1 = saturate_cast<T>(src1[x+3]*alpha + src2[x+3]*beta + gamma); 00596 dst[x+2] = t0; dst[x+3] = t1; 00597 } 00598 #endif 00599 for( ; x < width; x++ ) 00600 dst[x] = saturate_cast<T>(src1[x]*alpha + src2[x]*beta + gamma); 00601 } 00602 } 00603 00604 } // cv:: 00605 00606 00607 #endif // __OPENCV_ARITHM_CORE_HPP__ 00608
Generated on Tue Jul 12 2022 14:45:57 by
1.7.2
