Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project-sd-card by
sumpixels.cpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Copyright (C) 2014, Itseez Inc., all rights reserved. 00016 // Third party copyrights are property of their respective owners. 00017 // 00018 // Redistribution and use in source and binary forms, with or without modification, 00019 // are permitted provided that the following conditions are met: 00020 // 00021 // * Redistribution's of source code must retain the above copyright notice, 00022 // this list of conditions and the following disclaimer. 00023 // 00024 // * Redistribution's in binary form must reproduce the above copyright notice, 00025 // this list of conditions and the following disclaimer in the documentation 00026 // and/or other materials provided with the distribution. 00027 // 00028 // * The name of the copyright holders may not be used to endorse or promote products 00029 // derived from this software without specific prior written permission. 00030 // 00031 // This software is provided by the copyright holders and contributors "as is" and 00032 // any express or implied warranties, including, but not limited to, the implied 00033 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00034 // In no event shall the Intel Corporation or contributors be liable for any direct, 00035 // indirect, incidental, special, exemplary, or consequential damages 00036 // (including, but not limited to, procurement of substitute goods or services; 00037 // loss of use, data, or profits; or business interruption) however caused 00038 // and on any theory of liability, whether in contract, strict liability, 00039 // or tort (including negligence or otherwise) arising in any way out of 00040 // the use of this software, even if advised of the possibility of such damage. 00041 // 00042 //M*/ 00043 00044 #include "precomp.hpp" 00045 #include "opencl_kernels_imgproc.hpp" 00046 00047 namespace cv 00048 { 00049 00050 template <typename T, typename ST, typename QT> 00051 struct Integral_SIMD 00052 { 00053 bool operator()(const T *, size_t, 00054 ST *, size_t, 00055 QT *, size_t, 00056 ST *, size_t, 00057 Size, int) const 00058 { 00059 return false; 00060 } 00061 }; 00062 00063 #if CV_SSE2 00064 00065 template <> 00066 struct Integral_SIMD<uchar, int, double> 00067 { 00068 Integral_SIMD() 00069 { 00070 haveSSE2 = checkHardwareSupport(CV_CPU_SSE2); 00071 } 00072 00073 bool operator()(const uchar * src, size_t _srcstep, 00074 int * sum, size_t _sumstep, 00075 double * sqsum, size_t, 00076 int * tilted, size_t, 00077 Size size, int cn) const 00078 { 00079 if (sqsum || tilted || cn != 1 || !haveSSE2) 00080 return false; 00081 00082 // the first iteration 00083 memset(sum, 0, (size.width + 1) * sizeof(int)); 00084 00085 __m128i v_zero = _mm_setzero_si128(), prev = v_zero; 00086 int j = 0; 00087 00088 // the others 00089 for (int i = 0; i < size.height; ++i) 00090 { 00091 const uchar * src_row = src + _srcstep * i; 00092 int * prev_sum_row = (int *)((uchar *)sum + _sumstep * i) + 1; 00093 int * sum_row = (int *)((uchar *)sum + _sumstep * (i + 1)) + 1; 00094 00095 sum_row[-1] = 0; 00096 00097 prev = v_zero; 00098 j = 0; 00099 00100 for ( ; j + 7 < size.width; j += 8) 00101 { 00102 __m128i vsuml = _mm_loadu_si128((const __m128i *)(prev_sum_row + j)); 00103 __m128i vsumh = _mm_loadu_si128((const __m128i *)(prev_sum_row + j + 4)); 00104 00105 __m128i el8shr0 = _mm_loadl_epi64((const __m128i *)(src_row + j)); 00106 __m128i el8shr1 = _mm_slli_si128(el8shr0, 1); 00107 __m128i el8shr2 = _mm_slli_si128(el8shr0, 2); 00108 __m128i el8shr3 = _mm_slli_si128(el8shr0, 3); 00109 00110 vsuml = _mm_add_epi32(vsuml, prev); 00111 vsumh = _mm_add_epi32(vsumh, prev); 00112 00113 __m128i el8shr12 = _mm_add_epi16(_mm_unpacklo_epi8(el8shr1, v_zero), 00114 _mm_unpacklo_epi8(el8shr2, v_zero)); 00115 __m128i el8shr03 = _mm_add_epi16(_mm_unpacklo_epi8(el8shr0, v_zero), 00116 _mm_unpacklo_epi8(el8shr3, v_zero)); 00117 __m128i el8 = _mm_add_epi16(el8shr12, el8shr03); 00118 00119 __m128i el4h = _mm_add_epi16(_mm_unpackhi_epi16(el8, v_zero), 00120 _mm_unpacklo_epi16(el8, v_zero)); 00121 00122 vsuml = _mm_add_epi32(vsuml, _mm_unpacklo_epi16(el8, v_zero)); 00123 vsumh = _mm_add_epi32(vsumh, el4h); 00124 00125 _mm_storeu_si128((__m128i *)(sum_row + j), vsuml); 00126 _mm_storeu_si128((__m128i *)(sum_row + j + 4), vsumh); 00127 00128 prev = _mm_add_epi32(prev, _mm_shuffle_epi32(el4h, _MM_SHUFFLE(3, 3, 3, 3))); 00129 } 00130 00131 for (int v = sum_row[j - 1] - prev_sum_row[j - 1]; j < size.width; ++j) 00132 sum_row[j] = (v += src_row[j]) + prev_sum_row[j]; 00133 } 00134 00135 return true; 00136 } 00137 00138 bool haveSSE2; 00139 }; 00140 00141 #endif 00142 00143 template<typename T, typename ST, typename QT> 00144 void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep, 00145 QT* sqsum, size_t _sqsumstep, ST* tilted, size_t _tiltedstep, 00146 Size size, int cn ) 00147 { 00148 int x, y, k; 00149 00150 if (Integral_SIMD<T, ST, QT>()(src, _srcstep, 00151 sum, _sumstep, 00152 sqsum, _sqsumstep, 00153 tilted, _tiltedstep, 00154 size, cn)) 00155 return; 00156 00157 int srcstep = (int)(_srcstep/sizeof(T)); 00158 int sumstep = (int)(_sumstep/sizeof(ST)); 00159 int tiltedstep = (int)(_tiltedstep/sizeof(ST)); 00160 int sqsumstep = (int)(_sqsumstep/sizeof(QT)); 00161 00162 size.width *= cn; 00163 00164 memset( sum, 0, (size.width+cn)*sizeof(sum[0])); 00165 sum += sumstep + cn; 00166 00167 if( sqsum ) 00168 { 00169 memset( sqsum, 0, (size.width+cn)*sizeof(sqsum[0])); 00170 sqsum += sqsumstep + cn; 00171 } 00172 00173 if( tilted ) 00174 { 00175 memset( tilted, 0, (size.width+cn)*sizeof(tilted[0])); 00176 tilted += tiltedstep + cn; 00177 } 00178 00179 if( sqsum == 0 && tilted == 0 ) 00180 { 00181 for( y = 0; y < size.height; y++, src += srcstep - cn, sum += sumstep - cn ) 00182 { 00183 for( k = 0; k < cn; k++, src++, sum++ ) 00184 { 00185 ST s = sum[-cn] = 0; 00186 for( x = 0; x < size.width; x += cn ) 00187 { 00188 s += src[x]; 00189 sum[x] = sum[x - sumstep] + s; 00190 } 00191 } 00192 } 00193 } 00194 else if( tilted == 0 ) 00195 { 00196 for( y = 0; y < size.height; y++, src += srcstep - cn, 00197 sum += sumstep - cn, sqsum += sqsumstep - cn ) 00198 { 00199 for( k = 0; k < cn; k++, src++, sum++, sqsum++ ) 00200 { 00201 ST s = sum[-cn] = 0; 00202 QT sq = sqsum[-cn] = 0; 00203 for( x = 0; x < size.width; x += cn ) 00204 { 00205 T it = src[x]; 00206 s += it; 00207 sq += (QT)it*it; 00208 ST t = sum[x - sumstep] + s; 00209 QT tq = sqsum[x - sqsumstep] + sq; 00210 sum[x] = t; 00211 sqsum[x] = tq; 00212 } 00213 } 00214 } 00215 } 00216 else 00217 { 00218 AutoBuffer<ST> _buf(size.width+cn); 00219 ST* buf = _buf; 00220 ST s; 00221 QT sq; 00222 for( k = 0; k < cn; k++, src++, sum++, tilted++, buf++ ) 00223 { 00224 sum[-cn] = tilted[-cn] = 0; 00225 00226 for( x = 0, s = 0, sq = 0; x < size.width; x += cn ) 00227 { 00228 T it = src[x]; 00229 buf[x] = tilted[x] = it; 00230 s += it; 00231 sq += (QT)it*it; 00232 sum[x] = s; 00233 if( sqsum ) 00234 sqsum[x] = sq; 00235 } 00236 00237 if( size.width == cn ) 00238 buf[cn] = 0; 00239 00240 if( sqsum ) 00241 { 00242 sqsum[-cn] = 0; 00243 sqsum++; 00244 } 00245 } 00246 00247 for( y = 1; y < size.height; y++ ) 00248 { 00249 src += srcstep - cn; 00250 sum += sumstep - cn; 00251 tilted += tiltedstep - cn; 00252 buf += -cn; 00253 00254 if( sqsum ) 00255 sqsum += sqsumstep - cn; 00256 00257 for( k = 0; k < cn; k++, src++, sum++, tilted++, buf++ ) 00258 { 00259 T it = src[0]; 00260 ST t0 = s = it; 00261 QT tq0 = sq = (QT)it*it; 00262 00263 sum[-cn] = 0; 00264 if( sqsum ) 00265 sqsum[-cn] = 0; 00266 tilted[-cn] = tilted[-tiltedstep]; 00267 00268 sum[0] = sum[-sumstep] + t0; 00269 if( sqsum ) 00270 sqsum[0] = sqsum[-sqsumstep] + tq0; 00271 tilted[0] = tilted[-tiltedstep] + t0 + buf[cn]; 00272 00273 for( x = cn; x < size.width - cn; x += cn ) 00274 { 00275 ST t1 = buf[x]; 00276 buf[x - cn] = t1 + t0; 00277 t0 = it = src[x]; 00278 tq0 = (QT)it*it; 00279 s += t0; 00280 sq += tq0; 00281 sum[x] = sum[x - sumstep] + s; 00282 if( sqsum ) 00283 sqsum[x] = sqsum[x - sqsumstep] + sq; 00284 t1 += buf[x + cn] + t0 + tilted[x - tiltedstep - cn]; 00285 tilted[x] = t1; 00286 } 00287 00288 if( size.width > cn ) 00289 { 00290 ST t1 = buf[x]; 00291 buf[x - cn] = t1 + t0; 00292 t0 = it = src[x]; 00293 tq0 = (QT)it*it; 00294 s += t0; 00295 sq += tq0; 00296 sum[x] = sum[x - sumstep] + s; 00297 if( sqsum ) 00298 sqsum[x] = sqsum[x - sqsumstep] + sq; 00299 tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn]; 00300 buf[x] = t0; 00301 } 00302 00303 if( sqsum ) 00304 sqsum++; 00305 } 00306 } 00307 } 00308 } 00309 00310 00311 #define DEF_INTEGRAL_FUNC(suffix, T, ST, QT) \ 00312 static void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \ 00313 ST* tilted, size_t tiltedstep, Size size, int cn ) \ 00314 { integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); } 00315 00316 DEF_INTEGRAL_FUNC(8u32s, uchar, int, double) 00317 DEF_INTEGRAL_FUNC(8u32s32s, uchar, int, int) 00318 DEF_INTEGRAL_FUNC(8u32f64f, uchar, float, double) 00319 DEF_INTEGRAL_FUNC(8u64f64f, uchar, double, double) 00320 DEF_INTEGRAL_FUNC(16u64f64f, ushort, double, double) 00321 DEF_INTEGRAL_FUNC(16s64f64f, short, double, double) 00322 DEF_INTEGRAL_FUNC(32f32f64f, float, float, double) 00323 DEF_INTEGRAL_FUNC(32f64f64f, float, double, double) 00324 DEF_INTEGRAL_FUNC(64f64f64f, double, double, double) 00325 00326 DEF_INTEGRAL_FUNC(8u32s32f, uchar, int, float) 00327 DEF_INTEGRAL_FUNC(8u32f32f, uchar, float, float) 00328 DEF_INTEGRAL_FUNC(32f32f32f, float, float, float) 00329 00330 typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep, 00331 uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep, 00332 Size size, int cn ); 00333 00334 #ifdef HAVE_OPENCL 00335 00336 static bool ocl_integral( InputArray _src, OutputArray _sum, int sdepth ) 00337 { 00338 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0; 00339 00340 if ( (_src.type() != CV_8UC1) || 00341 !(sdepth == CV_32S || sdepth == CV_32F || (doubleSupport && sdepth == CV_64F))) 00342 return false; 00343 00344 static const int tileSize = 16; 00345 00346 String build_opt = format("-D sumT=%s -D LOCAL_SUM_SIZE=%d%s", 00347 ocl::typeToStr(sdepth), tileSize, 00348 doubleSupport ? " -D DOUBLE_SUPPORT" : ""); 00349 00350 ocl::Kernel kcols("integral_sum_cols", ocl::imgproc::integral_sum_oclsrc, build_opt); 00351 if (kcols.empty()) 00352 return false; 00353 00354 UMat src = _src.getUMat(); 00355 Size src_size = src.size(); 00356 Size bufsize(((src_size.height + tileSize - 1) / tileSize) * tileSize, ((src_size.width + tileSize - 1) / tileSize) * tileSize); 00357 UMat buf(bufsize, sdepth); 00358 kcols.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnlyNoSize(buf)); 00359 size_t gt = src.cols, lt = tileSize; 00360 if (!kcols.run(1, >, <, false)) 00361 return false; 00362 00363 ocl::Kernel krows("integral_sum_rows", ocl::imgproc::integral_sum_oclsrc, build_opt); 00364 if (krows.empty()) 00365 return false; 00366 00367 Size sumsize(src_size.width + 1, src_size.height + 1); 00368 _sum.create(sumsize, sdepth); 00369 UMat sum = _sum.getUMat(); 00370 00371 krows.args(ocl::KernelArg::ReadOnlyNoSize(buf), ocl::KernelArg::WriteOnly(sum)); 00372 gt = src.rows; 00373 return krows.run(1, >, <, false); 00374 } 00375 00376 static bool ocl_integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, int sdepth, int sqdepth ) 00377 { 00378 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0; 00379 00380 if ( _src.type() != CV_8UC1 || (!doubleSupport && (sdepth == CV_64F || sqdepth == CV_64F)) ) 00381 return false; 00382 00383 static const int tileSize = 16; 00384 00385 String build_opt = format("-D SUM_SQUARE -D sumT=%s -D sumSQT=%s -D LOCAL_SUM_SIZE=%d%s", 00386 ocl::typeToStr(sdepth), ocl::typeToStr(sqdepth), 00387 tileSize, 00388 doubleSupport ? " -D DOUBLE_SUPPORT" : ""); 00389 00390 ocl::Kernel kcols("integral_sum_cols", ocl::imgproc::integral_sum_oclsrc, build_opt); 00391 if (kcols.empty()) 00392 return false; 00393 00394 UMat src = _src.getUMat(); 00395 Size src_size = src.size(); 00396 Size bufsize(((src_size.height + tileSize - 1) / tileSize) * tileSize, ((src_size.width + tileSize - 1) / tileSize) * tileSize); 00397 UMat buf(bufsize, sdepth); 00398 UMat buf_sq(bufsize, sqdepth); 00399 kcols.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnlyNoSize(buf), ocl::KernelArg::WriteOnlyNoSize(buf_sq)); 00400 size_t gt = src.cols, lt = tileSize; 00401 if (!kcols.run(1, >, <, false)) 00402 return false; 00403 00404 ocl::Kernel krows("integral_sum_rows", ocl::imgproc::integral_sum_oclsrc, build_opt); 00405 if (krows.empty()) 00406 return false; 00407 00408 Size sumsize(src_size.width + 1, src_size.height + 1); 00409 _sum.create(sumsize, sdepth); 00410 UMat sum = _sum.getUMat(); 00411 _sqsum.create(sumsize, sqdepth); 00412 UMat sum_sq = _sqsum.getUMat(); 00413 00414 krows.args(ocl::KernelArg::ReadOnlyNoSize(buf), ocl::KernelArg::ReadOnlyNoSize(buf_sq), ocl::KernelArg::WriteOnly(sum), ocl::KernelArg::WriteOnlyNoSize(sum_sq)); 00415 gt = src.rows; 00416 return krows.run(1, >, <, false); 00417 } 00418 00419 #endif 00420 00421 } 00422 00423 #if defined(HAVE_IPP) 00424 namespace cv 00425 { 00426 static bool ipp_integral(InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth, int sqdepth) 00427 { 00428 #if !defined(HAVE_IPP_ICV_ONLY) && (IPP_VERSION_X100 != 900) // Disabled on ICV due invalid results 00429 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); 00430 if( sdepth <= 0 ) 00431 sdepth = depth == CV_8U ? CV_32S : CV_64F; 00432 if ( sqdepth <= 0 ) 00433 sqdepth = CV_64F; 00434 sdepth = CV_MAT_DEPTH(sdepth), sqdepth = CV_MAT_DEPTH(sqdepth); 00435 00436 00437 Size ssize = _src.size(), isize(ssize.width + 1, ssize.height + 1); 00438 _sum.create( isize, CV_MAKETYPE(sdepth, cn) ); 00439 Mat src = _src.getMat(), sum =_sum.getMat(), sqsum, tilted; 00440 00441 if( _sqsum.needed() ) 00442 { 00443 _sqsum.create( isize, CV_MAKETYPE(sqdepth, cn) ); 00444 sqsum = _sqsum.getMat(); 00445 }; 00446 00447 if( ( depth == CV_8U ) && ( sdepth == CV_32F || sdepth == CV_32S ) && ( !_tilted.needed() ) && ( !_sqsum.needed() || sqdepth == CV_64F ) && ( cn == 1 ) ) 00448 { 00449 IppStatus status = ippStsErr; 00450 IppiSize srcRoiSize = ippiSize( src.cols, src.rows ); 00451 if( sdepth == CV_32F ) 00452 { 00453 if( _sqsum.needed() ) 00454 { 00455 status = ippiSqrIntegral_8u32f64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); 00456 } 00457 else 00458 { 00459 status = ippiIntegral_8u32f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, srcRoiSize, 0 ); 00460 } 00461 } 00462 else if( sdepth == CV_32S ) 00463 { 00464 if( _sqsum.needed() ) 00465 { 00466 status = ippiSqrIntegral_8u32s64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); 00467 } 00468 else 00469 { 00470 status = ippiIntegral_8u32s_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, srcRoiSize, 0 ); 00471 } 00472 } 00473 if (0 <= status) 00474 { 00475 CV_IMPL_ADD(CV_IMPL_IPP); 00476 return true; 00477 } 00478 } 00479 #else 00480 CV_UNUSED(_src); CV_UNUSED(_sum); CV_UNUSED(_sqsum); CV_UNUSED(_tilted); CV_UNUSED(sdepth); CV_UNUSED(sqdepth); 00481 #endif 00482 return false; 00483 } 00484 } 00485 #endif 00486 00487 void cv::integral ( InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth, int sqdepth ) 00488 { 00489 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); 00490 if( sdepth <= 0 ) 00491 sdepth = depth == CV_8U ? CV_32S : CV_64F; 00492 if ( sqdepth <= 0 ) 00493 sqdepth = CV_64F; 00494 sdepth = CV_MAT_DEPTH(sdepth), sqdepth = CV_MAT_DEPTH(sqdepth); 00495 00496 #ifdef HAVE_OPENCL 00497 if (ocl::useOpenCL() && _sum.isUMat() && !_tilted.needed()) 00498 { 00499 if (!_sqsum.needed()) 00500 { 00501 CV_OCL_RUN(ocl::useOpenCL(), ocl_integral(_src, _sum, sdepth)) 00502 } 00503 else if (_sqsum.isUMat()) 00504 CV_OCL_RUN(ocl::useOpenCL(), ocl_integral(_src, _sum, _sqsum, sdepth, sqdepth)) 00505 } 00506 #endif 00507 00508 Size ssize = _src.size(), isize(ssize.width + 1, ssize.height + 1); 00509 _sum.create( isize, CV_MAKETYPE(sdepth, cn) ); 00510 Mat src = _src.getMat(), sum =_sum.getMat(), sqsum, tilted; 00511 00512 if( _sqsum.needed() ) 00513 { 00514 _sqsum.create( isize, CV_MAKETYPE(sqdepth, cn) ); 00515 sqsum = _sqsum.getMat(); 00516 }; 00517 00518 CV_IPP_RUN(( depth == CV_8U ) && ( sdepth == CV_32F || sdepth == CV_32S ) && 00519 ( !_tilted.needed() ) && ( !_sqsum.needed() || sqdepth == CV_64F ) && ( cn == 1 ), 00520 ipp_integral(_src, _sum, _sqsum, _tilted, sdepth, sqdepth)); 00521 00522 if( _tilted.needed() ) 00523 { 00524 _tilted.create( isize, CV_MAKETYPE(sdepth, cn) ); 00525 tilted = _tilted.getMat(); 00526 } 00527 00528 IntegralFunc func = 0; 00529 if( depth == CV_8U && sdepth == CV_32S && sqdepth == CV_64F ) 00530 func = (IntegralFunc)GET_OPTIMIZED(integral_8u32s); 00531 else if( depth == CV_8U && sdepth == CV_32S && sqdepth == CV_32F ) 00532 func = (IntegralFunc)integral_8u32s32f; 00533 else if( depth == CV_8U && sdepth == CV_32S && sqdepth == CV_32S ) 00534 func = (IntegralFunc)integral_8u32s32s; 00535 else if( depth == CV_8U && sdepth == CV_32F && sqdepth == CV_64F ) 00536 func = (IntegralFunc)integral_8u32f64f; 00537 else if( depth == CV_8U && sdepth == CV_32F && sqdepth == CV_32F ) 00538 func = (IntegralFunc)integral_8u32f32f; 00539 else if( depth == CV_8U && sdepth == CV_64F && sqdepth == CV_64F ) 00540 func = (IntegralFunc)integral_8u64f64f; 00541 else if( depth == CV_16U && sdepth == CV_64F && sqdepth == CV_64F ) 00542 func = (IntegralFunc)integral_16u64f64f; 00543 else if( depth == CV_16S && sdepth == CV_64F && sqdepth == CV_64F ) 00544 func = (IntegralFunc)integral_16s64f64f; 00545 else if( depth == CV_32F && sdepth == CV_32F && sqdepth == CV_64F ) 00546 func = (IntegralFunc)integral_32f32f64f; 00547 else if( depth == CV_32F && sdepth == CV_32F && sqdepth == CV_32F ) 00548 func = (IntegralFunc)integral_32f32f32f; 00549 else if( depth == CV_32F && sdepth == CV_64F && sqdepth == CV_64F ) 00550 func = (IntegralFunc)integral_32f64f64f; 00551 else if( depth == CV_64F && sdepth == CV_64F && sqdepth == CV_64F ) 00552 func = (IntegralFunc)integral_64f64f64f; 00553 else 00554 CV_Error( CV_StsUnsupportedFormat, "" ); 00555 00556 func( src.ptr(), src.step, sum.ptr(), sum.step, sqsum.ptr(), sqsum.step, 00557 tilted.ptr(), tilted.step, src.size(), cn ); 00558 } 00559 00560 void cv::integral ( InputArray src, OutputArray sum, int sdepth ) 00561 { 00562 integral ( src, sum, noArray(), noArray(), sdepth ); 00563 } 00564 00565 void cv::integral ( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth, int sqdepth ) 00566 { 00567 integral ( src, sum, sqsum, noArray(), sdepth, sqdepth ); 00568 } 00569 00570 00571 CV_IMPL void 00572 cvIntegral( const CvArr* image, CvArr* sumImage, 00573 CvArr* sumSqImage, CvArr* tiltedSumImage ) 00574 { 00575 cv::Mat src = cv::cvarrToMat(image), sum = cv::cvarrToMat(sumImage), sum0 = sum; 00576 cv::Mat sqsum0, sqsum, tilted0, tilted; 00577 cv::Mat *psqsum = 0, *ptilted = 0; 00578 00579 if( sumSqImage ) 00580 { 00581 sqsum0 = sqsum = cv::cvarrToMat(sumSqImage); 00582 psqsum = &sqsum; 00583 } 00584 00585 if( tiltedSumImage ) 00586 { 00587 tilted0 = tilted = cv::cvarrToMat(tiltedSumImage); 00588 ptilted = &tilted; 00589 } 00590 cv::integral ( src, sum, psqsum ? cv::_OutputArray(*psqsum) : cv::_OutputArray(), 00591 ptilted ? cv::_OutputArray(*ptilted) : cv::_OutputArray(), sum.depth() ); 00592 00593 CV_Assert( sum.data == sum0.data && sqsum.data == sqsum0.data && tilted.data == tilted0.data ); 00594 } 00595 00596 /* End of file. */ 00597
Generated on Tue Jul 12 2022 14:47:39 by
