Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project-sd-card by
histogram.cpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // Intel License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000, Intel Corporation, all rights reserved. 00014 // Third party copyrights are property of their respective owners. 00015 // 00016 // Redistribution and use in source and binary forms, with or without modification, 00017 // are permitted provided that the following conditions are met: 00018 // 00019 // * Redistribution's of source code must retain the above copyright notice, 00020 // this list of conditions and the following disclaimer. 00021 // 00022 // * Redistribution's in binary form must reproduce the above copyright notice, 00023 // this list of conditions and the following disclaimer in the documentation 00024 // and/or other materials provided with the distribution. 00025 // 00026 // * The name of Intel Corporation may not be used to endorse or promote products 00027 // derived from this software without specific prior written permission. 00028 // 00029 // This software is provided by the copyright holders and contributors "as is" and 00030 // any express or implied warranties, including, but not limited to, the implied 00031 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00032 // In no event shall the Intel Corporation or contributors be liable for any direct, 00033 // indirect, incidental, special, exemplary, or consequential damages 00034 // (including, but not limited to, procurement of substitute goods or services; 00035 // loss of use, data, or profits; or business interruption) however caused 00036 // and on any theory of liability, whether in contract, strict liability, 00037 // or tort (including negligence or otherwise) arising in any way out of 00038 // the use of this software, even if advised of the possibility of such damage. 00039 // 00040 //M*/ 00041 00042 #include "precomp.hpp" 00043 #include "opencl_kernels_imgproc.hpp" 00044 00045 namespace cv 00046 { 00047 00048 ////////////////// Helper functions ////////////////////// 00049 00050 static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2); 00051 00052 static void 00053 calcHistLookupTables_8u( const Mat& hist, const SparseMat& shist, 00054 int dims, const float** ranges, const double* uniranges, 00055 bool uniform, bool issparse, std::vector<size_t>& _tab ) 00056 { 00057 const int low = 0, high = 256; 00058 int i, j; 00059 _tab.resize((high-low)*dims); 00060 size_t* tab = &_tab[0]; 00061 00062 if( uniform ) 00063 { 00064 for( i = 0; i < dims; i++ ) 00065 { 00066 double a = uniranges[i*2]; 00067 double b = uniranges[i*2+1]; 00068 int sz = !issparse ? hist.size[i] : shist.size(i); 00069 size_t step = !issparse ? hist.step[i] : 1; 00070 00071 for( j = low; j < high; j++ ) 00072 { 00073 int idx = cvFloor(j*a + b); 00074 size_t written_idx; 00075 if( (unsigned)idx < (unsigned)sz ) 00076 written_idx = idx*step; 00077 else 00078 written_idx = OUT_OF_RANGE; 00079 00080 tab[i*(high - low) + j - low] = written_idx; 00081 } 00082 } 00083 } 00084 else 00085 { 00086 for( i = 0; i < dims; i++ ) 00087 { 00088 int limit = std::min(cvCeil(ranges[i][0]), high); 00089 int idx = -1, sz = !issparse ? hist.size[i] : shist.size(i); 00090 size_t written_idx = OUT_OF_RANGE; 00091 size_t step = !issparse ? hist.step[i] : 1; 00092 00093 for(j = low;;) 00094 { 00095 for( ; j < limit; j++ ) 00096 tab[i*(high - low) + j - low] = written_idx; 00097 00098 if( (unsigned)(++idx) < (unsigned)sz ) 00099 { 00100 limit = std::min(cvCeil(ranges[i][idx+1]), high); 00101 written_idx = idx*step; 00102 } 00103 else 00104 { 00105 for( ; j < high; j++ ) 00106 tab[i*(high - low) + j - low] = OUT_OF_RANGE; 00107 break; 00108 } 00109 } 00110 } 00111 } 00112 } 00113 00114 00115 static void histPrepareImages( const Mat* images, int nimages, const int* channels, 00116 const Mat& mask, int dims, const int* histSize, 00117 const float** ranges, bool uniform, 00118 std::vector<uchar*>& ptrs, std::vector<int>& deltas, 00119 Size& imsize, std::vector<double>& uniranges ) 00120 { 00121 int i, j, c; 00122 CV_Assert( channels != 0 || nimages == dims ); 00123 00124 imsize = images[0].size(); 00125 int depth = images[0].depth(), esz1 = (int)images[0].elemSize1(); 00126 bool isContinuous = true; 00127 00128 ptrs.resize(dims + 1); 00129 deltas.resize((dims + 1)*2); 00130 00131 for( i = 0; i < dims; i++ ) 00132 { 00133 if(!channels) 00134 { 00135 j = i; 00136 c = 0; 00137 CV_Assert( images[j].channels() == 1 ); 00138 } 00139 else 00140 { 00141 c = channels[i]; 00142 CV_Assert( c >= 0 ); 00143 for( j = 0; j < nimages; c -= images[j].channels(), j++ ) 00144 if( c < images[j].channels() ) 00145 break; 00146 CV_Assert( j < nimages ); 00147 } 00148 00149 CV_Assert( images[j].size() == imsize && images[j].depth() == depth ); 00150 if( !images[j].isContinuous() ) 00151 isContinuous = false; 00152 ptrs[i] = images[j].data + c*esz1; 00153 deltas[i*2] = images[j].channels(); 00154 deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]); 00155 } 00156 00157 if( !mask.empty() ) 00158 { 00159 CV_Assert( mask.size() == imsize && mask.channels() == 1 ); 00160 isContinuous = isContinuous && mask.isContinuous(); 00161 ptrs[dims] = mask.data; 00162 deltas[dims*2] = 1; 00163 deltas[dims*2 + 1] = (int)(mask.step/mask.elemSize1()); 00164 } 00165 00166 #ifndef HAVE_TBB 00167 if( isContinuous ) 00168 { 00169 imsize.width *= imsize.height; 00170 imsize.height = 1; 00171 } 00172 #endif 00173 00174 if( !ranges ) 00175 { 00176 CV_Assert( depth == CV_8U ); 00177 00178 uniranges.resize( dims*2 ); 00179 for( i = 0; i < dims; i++ ) 00180 { 00181 uniranges[i*2] = histSize[i]/256.; 00182 uniranges[i*2+1] = 0; 00183 } 00184 } 00185 else if( uniform ) 00186 { 00187 uniranges.resize( dims*2 ); 00188 for( i = 0; i < dims; i++ ) 00189 { 00190 CV_Assert( ranges[i] && ranges[i][0] < ranges[i][1] ); 00191 double low = ranges[i][0], high = ranges[i][1]; 00192 double t = histSize[i]/(high - low); 00193 uniranges[i*2] = t; 00194 uniranges[i*2+1] = -t*low; 00195 } 00196 } 00197 else 00198 { 00199 for( i = 0; i < dims; i++ ) 00200 { 00201 size_t n = histSize[i]; 00202 for(size_t k = 0; k < n; k++ ) 00203 CV_Assert( ranges[i][k] < ranges[i][k+1] ); 00204 } 00205 } 00206 } 00207 00208 00209 ////////////////////////////////// C A L C U L A T E H I S T O G R A M //////////////////////////////////// 00210 #ifdef HAVE_TBB 00211 enum {one = 1, two, three}; // array elements number 00212 00213 template<typename T> 00214 class calcHist1D_Invoker 00215 { 00216 public: 00217 calcHist1D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00218 Mat& hist, const double* _uniranges, int sz, int dims, 00219 Size& imageSize ) 00220 : mask_(_ptrs[dims]), 00221 mstep_(_deltas[dims*2 + 1]), 00222 imageWidth_(imageSize.width), 00223 histogramSize_(hist.size()), histogramType_(hist.type()), 00224 globalHistogram_((tbb::atomic<int>*)hist.data) 00225 { 00226 p_[0] = ((T**)&_ptrs[0])[0]; 00227 step_[0] = (&_deltas[0])[1]; 00228 d_[0] = (&_deltas[0])[0]; 00229 a_[0] = (&_uniranges[0])[0]; 00230 b_[0] = (&_uniranges[0])[1]; 00231 size_[0] = sz; 00232 } 00233 00234 void operator()( const BlockedRange& range ) const 00235 { 00236 T* p0 = p_[0] + range.begin() * (step_[0] + imageWidth_*d_[0]); 00237 uchar* mask = mask_ + range.begin()*mstep_; 00238 00239 for( int row = range.begin(); row < range.end(); row++, p0 += step_[0] ) 00240 { 00241 if( !mask_ ) 00242 { 00243 for( int x = 0; x < imageWidth_; x++, p0 += d_[0] ) 00244 { 00245 int idx = cvFloor(*p0*a_[0] + b_[0]); 00246 if( (unsigned)idx < (unsigned)size_[0] ) 00247 { 00248 globalHistogram_[idx].fetch_and_add(1); 00249 } 00250 } 00251 } 00252 else 00253 { 00254 for( int x = 0; x < imageWidth_; x++, p0 += d_[0] ) 00255 { 00256 if( mask[x] ) 00257 { 00258 int idx = cvFloor(*p0*a_[0] + b_[0]); 00259 if( (unsigned)idx < (unsigned)size_[0] ) 00260 { 00261 globalHistogram_[idx].fetch_and_add(1); 00262 } 00263 } 00264 } 00265 mask += mstep_; 00266 } 00267 } 00268 } 00269 00270 private: 00271 calcHist1D_Invoker operator=(const calcHist1D_Invoker&); 00272 00273 T* p_[one]; 00274 uchar* mask_; 00275 int step_[one]; 00276 int d_[one]; 00277 int mstep_; 00278 double a_[one]; 00279 double b_[one]; 00280 int size_[one]; 00281 int imageWidth_; 00282 Size histogramSize_; 00283 int histogramType_; 00284 tbb::atomic<int>* globalHistogram_; 00285 }; 00286 00287 template<typename T> 00288 class calcHist2D_Invoker 00289 { 00290 public: 00291 calcHist2D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00292 Mat& hist, const double* _uniranges, const int* size, 00293 int dims, Size& imageSize, size_t* hstep ) 00294 : mask_(_ptrs[dims]), 00295 mstep_(_deltas[dims*2 + 1]), 00296 imageWidth_(imageSize.width), 00297 histogramSize_(hist.size()), histogramType_(hist.type()), 00298 globalHistogram_(hist.data) 00299 { 00300 p_[0] = ((T**)&_ptrs[0])[0]; p_[1] = ((T**)&_ptrs[0])[1]; 00301 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; 00302 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; 00303 a_[0] = (&_uniranges[0])[0]; a_[1] = (&_uniranges[0])[2]; 00304 b_[0] = (&_uniranges[0])[1]; b_[1] = (&_uniranges[0])[3]; 00305 size_[0] = size[0]; size_[1] = size[1]; 00306 hstep_[0] = hstep[0]; 00307 } 00308 00309 void operator()(const BlockedRange& range) const 00310 { 00311 T* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]); 00312 T* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]); 00313 uchar* mask = mask_ + range.begin()*mstep_; 00314 00315 for( int row = range.begin(); row < range.end(); row++, p0 += step_[0], p1 += step_[1] ) 00316 { 00317 if( !mask_ ) 00318 { 00319 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] ) 00320 { 00321 int idx0 = cvFloor(*p0*a_[0] + b_[0]); 00322 int idx1 = cvFloor(*p1*a_[1] + b_[1]); 00323 if( (unsigned)idx0 < (unsigned)size_[0] && (unsigned)idx1 < (unsigned)size_[1] ) 00324 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0) )[idx1].fetch_and_add(1); 00325 } 00326 } 00327 else 00328 { 00329 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] ) 00330 { 00331 if( mask[x] ) 00332 { 00333 int idx0 = cvFloor(*p0*a_[0] + b_[0]); 00334 int idx1 = cvFloor(*p1*a_[1] + b_[1]); 00335 if( (unsigned)idx0 < (unsigned)size_[0] && (unsigned)idx1 < (unsigned)size_[1] ) 00336 ((tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0))[idx1].fetch_and_add(1); 00337 } 00338 } 00339 mask += mstep_; 00340 } 00341 } 00342 } 00343 00344 private: 00345 calcHist2D_Invoker operator=(const calcHist2D_Invoker&); 00346 00347 T* p_[two]; 00348 uchar* mask_; 00349 int step_[two]; 00350 int d_[two]; 00351 int mstep_; 00352 double a_[two]; 00353 double b_[two]; 00354 int size_[two]; 00355 const int imageWidth_; 00356 size_t hstep_[one]; 00357 Size histogramSize_; 00358 int histogramType_; 00359 uchar* globalHistogram_; 00360 }; 00361 00362 00363 template<typename T> 00364 class calcHist3D_Invoker 00365 { 00366 public: 00367 calcHist3D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00368 Size imsize, Mat& hist, const double* uniranges, int _dims, 00369 size_t* hstep, int* size ) 00370 : mask_(_ptrs[_dims]), 00371 mstep_(_deltas[_dims*2 + 1]), 00372 imageWidth_(imsize.width), 00373 globalHistogram_(hist.data) 00374 { 00375 p_[0] = ((T**)&_ptrs[0])[0]; p_[1] = ((T**)&_ptrs[0])[1]; p_[2] = ((T**)&_ptrs[0])[2]; 00376 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; step_[2] = (&_deltas[0])[5]; 00377 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; d_[2] = (&_deltas[0])[4]; 00378 a_[0] = uniranges[0]; a_[1] = uniranges[2]; a_[2] = uniranges[4]; 00379 b_[0] = uniranges[1]; b_[1] = uniranges[3]; b_[2] = uniranges[5]; 00380 size_[0] = size[0]; size_[1] = size[1]; size_[2] = size[2]; 00381 hstep_[0] = hstep[0]; hstep_[1] = hstep[1]; 00382 } 00383 00384 void operator()( const BlockedRange& range ) const 00385 { 00386 T* p0 = p_[0] + range.begin()*(imageWidth_*d_[0] + step_[0]); 00387 T* p1 = p_[1] + range.begin()*(imageWidth_*d_[1] + step_[1]); 00388 T* p2 = p_[2] + range.begin()*(imageWidth_*d_[2] + step_[2]); 00389 uchar* mask = mask_ + range.begin()*mstep_; 00390 00391 for( int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1], p2 += step_[2] ) 00392 { 00393 if( !mask_ ) 00394 { 00395 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] ) 00396 { 00397 int idx0 = cvFloor(*p0*a_[0] + b_[0]); 00398 int idx1 = cvFloor(*p1*a_[1] + b_[1]); 00399 int idx2 = cvFloor(*p2*a_[2] + b_[2]); 00400 if( (unsigned)idx0 < (unsigned)size_[0] && 00401 (unsigned)idx1 < (unsigned)size_[1] && 00402 (unsigned)idx2 < (unsigned)size_[2] ) 00403 { 00404 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0 + hstep_[1]*idx1) )[idx2].fetch_and_add(1); 00405 } 00406 } 00407 } 00408 else 00409 { 00410 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] ) 00411 { 00412 if( mask[x] ) 00413 { 00414 int idx0 = cvFloor(*p0*a_[0] + b_[0]); 00415 int idx1 = cvFloor(*p1*a_[1] + b_[1]); 00416 int idx2 = cvFloor(*p2*a_[2] + b_[2]); 00417 if( (unsigned)idx0 < (unsigned)size_[0] && 00418 (unsigned)idx1 < (unsigned)size_[1] && 00419 (unsigned)idx2 < (unsigned)size_[2] ) 00420 { 00421 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0 + hstep_[1]*idx1) )[idx2].fetch_and_add(1); 00422 } 00423 } 00424 } 00425 mask += mstep_; 00426 } 00427 } 00428 } 00429 00430 static bool isFit( const Mat& histogram, const Size imageSize ) 00431 { 00432 return ( imageSize.width * imageSize.height >= 320*240 00433 && histogram.total() >= 8*8*8 ); 00434 } 00435 00436 private: 00437 calcHist3D_Invoker operator=(const calcHist3D_Invoker&); 00438 00439 T* p_[three]; 00440 uchar* mask_; 00441 int step_[three]; 00442 int d_[three]; 00443 const int mstep_; 00444 double a_[three]; 00445 double b_[three]; 00446 int size_[three]; 00447 int imageWidth_; 00448 size_t hstep_[two]; 00449 uchar* globalHistogram_; 00450 }; 00451 00452 class CalcHist1D_8uInvoker 00453 { 00454 public: 00455 CalcHist1D_8uInvoker( const std::vector<uchar*>& ptrs, const std::vector<int>& deltas, 00456 Size imsize, Mat& hist, int dims, const std::vector<size_t>& tab, 00457 tbb::mutex* lock ) 00458 : mask_(ptrs[dims]), 00459 mstep_(deltas[dims*2 + 1]), 00460 imageWidth_(imsize.width), 00461 imageSize_(imsize), 00462 histSize_(hist.size()), histType_(hist.type()), 00463 tab_((size_t*)&tab[0]), 00464 histogramWriteLock_(lock), 00465 globalHistogram_(hist.data) 00466 { 00467 p_[0] = (&ptrs[0])[0]; 00468 step_[0] = (&deltas[0])[1]; 00469 d_[0] = (&deltas[0])[0]; 00470 } 00471 00472 void operator()( const BlockedRange& range ) const 00473 { 00474 int localHistogram[256] = { 0, }; 00475 uchar* mask = mask_; 00476 uchar* p0 = p_[0]; 00477 int x; 00478 tbb::mutex::scoped_lock lock; 00479 00480 if( !mask_ ) 00481 { 00482 int n = (imageWidth_ - 4) / 4 + 1; 00483 int tail = imageWidth_ - n*4; 00484 00485 int xN = 4*n; 00486 p0 += (xN*d_[0] + tail*d_[0] + step_[0]) * range.begin(); 00487 } 00488 else 00489 { 00490 p0 += (imageWidth_*d_[0] + step_[0]) * range.begin(); 00491 mask += mstep_*range.begin(); 00492 } 00493 00494 for( int i = range.begin(); i < range.end(); i++, p0 += step_[0] ) 00495 { 00496 if( !mask_ ) 00497 { 00498 if( d_[0] == 1 ) 00499 { 00500 for( x = 0; x <= imageWidth_ - 4; x += 4 ) 00501 { 00502 int t0 = p0[x], t1 = p0[x+1]; 00503 localHistogram[t0]++; localHistogram[t1]++; 00504 t0 = p0[x+2]; t1 = p0[x+3]; 00505 localHistogram[t0]++; localHistogram[t1]++; 00506 } 00507 p0 += x; 00508 } 00509 else 00510 { 00511 for( x = 0; x <= imageWidth_ - 4; x += 4 ) 00512 { 00513 int t0 = p0[0], t1 = p0[d_[0]]; 00514 localHistogram[t0]++; localHistogram[t1]++; 00515 p0 += d_[0]*2; 00516 t0 = p0[0]; t1 = p0[d_[0]]; 00517 localHistogram[t0]++; localHistogram[t1]++; 00518 p0 += d_[0]*2; 00519 } 00520 } 00521 00522 for( ; x < imageWidth_; x++, p0 += d_[0] ) 00523 { 00524 localHistogram[*p0]++; 00525 } 00526 } 00527 else 00528 { 00529 for( x = 0; x < imageWidth_; x++, p0 += d_[0] ) 00530 { 00531 if( mask[x] ) 00532 { 00533 localHistogram[*p0]++; 00534 } 00535 } 00536 mask += mstep_; 00537 } 00538 } 00539 00540 lock.acquire(*histogramWriteLock_); 00541 for(int i = 0; i < 256; i++ ) 00542 { 00543 size_t hidx = tab_[i]; 00544 if( hidx < OUT_OF_RANGE ) 00545 { 00546 *(int*)((globalHistogram_ + hidx)) += localHistogram[i]; 00547 } 00548 } 00549 lock.release(); 00550 } 00551 00552 static bool isFit( const Mat& histogram, const Size imageSize ) 00553 { 00554 return ( histogram.total() >= 8 00555 && imageSize.width * imageSize.height >= 160*120 ); 00556 } 00557 00558 private: 00559 uchar* p_[one]; 00560 uchar* mask_; 00561 int mstep_; 00562 int step_[one]; 00563 int d_[one]; 00564 int imageWidth_; 00565 Size imageSize_; 00566 Size histSize_; 00567 int histType_; 00568 size_t* tab_; 00569 tbb::mutex* histogramWriteLock_; 00570 uchar* globalHistogram_; 00571 }; 00572 00573 class CalcHist2D_8uInvoker 00574 { 00575 public: 00576 CalcHist2D_8uInvoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00577 Size imsize, Mat& hist, int dims, const std::vector<size_t>& _tab, 00578 tbb::mutex* lock ) 00579 : mask_(_ptrs[dims]), 00580 mstep_(_deltas[dims*2 + 1]), 00581 imageWidth_(imsize.width), 00582 histSize_(hist.size()), histType_(hist.type()), 00583 tab_((size_t*)&_tab[0]), 00584 histogramWriteLock_(lock), 00585 globalHistogram_(hist.data) 00586 { 00587 p_[0] = (uchar*)(&_ptrs[0])[0]; p_[1] = (uchar*)(&_ptrs[0])[1]; 00588 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; 00589 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; 00590 } 00591 00592 void operator()( const BlockedRange& range ) const 00593 { 00594 uchar* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]); 00595 uchar* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]); 00596 uchar* mask = mask_ + range.begin()*mstep_; 00597 00598 Mat localHist = Mat::zeros(histSize_, histType_); 00599 uchar* localHistData = localHist.data; 00600 tbb::mutex::scoped_lock lock; 00601 00602 for(int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1]) 00603 { 00604 if( !mask_ ) 00605 { 00606 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] ) 00607 { 00608 size_t idx = tab_[*p0] + tab_[*p1 + 256]; 00609 if( idx < OUT_OF_RANGE ) 00610 { 00611 ++*(int*)(localHistData + idx); 00612 } 00613 } 00614 } 00615 else 00616 { 00617 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] ) 00618 { 00619 size_t idx; 00620 if( mask[x] && (idx = tab_[*p0] + tab_[*p1 + 256]) < OUT_OF_RANGE ) 00621 { 00622 ++*(int*)(localHistData + idx); 00623 } 00624 } 00625 mask += mstep_; 00626 } 00627 } 00628 00629 lock.acquire(*histogramWriteLock_); 00630 for(int i = 0; i < histSize_.width*histSize_.height; i++) 00631 { 00632 ((int*)globalHistogram_)[i] += ((int*)localHistData)[i]; 00633 } 00634 lock.release(); 00635 } 00636 00637 static bool isFit( const Mat& histogram, const Size imageSize ) 00638 { 00639 return ( (histogram.total() > 4*4 && histogram.total() <= 116*116 00640 && imageSize.width * imageSize.height >= 320*240) 00641 || (histogram.total() > 116*116 && imageSize.width * imageSize.height >= 1280*720) ); 00642 } 00643 00644 private: 00645 uchar* p_[two]; 00646 uchar* mask_; 00647 int step_[two]; 00648 int d_[two]; 00649 int mstep_; 00650 int imageWidth_; 00651 Size histSize_; 00652 int histType_; 00653 size_t* tab_; 00654 tbb::mutex* histogramWriteLock_; 00655 uchar* globalHistogram_; 00656 }; 00657 00658 class CalcHist3D_8uInvoker 00659 { 00660 public: 00661 CalcHist3D_8uInvoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00662 Size imsize, Mat& hist, int dims, const std::vector<size_t>& tab ) 00663 : mask_(_ptrs[dims]), 00664 mstep_(_deltas[dims*2 + 1]), 00665 histogramSize_(hist.size.p), histogramType_(hist.type()), 00666 imageWidth_(imsize.width), 00667 tab_((size_t*)&tab[0]), 00668 globalHistogram_(hist.data) 00669 { 00670 p_[0] = (uchar*)(&_ptrs[0])[0]; p_[1] = (uchar*)(&_ptrs[0])[1]; p_[2] = (uchar*)(&_ptrs[0])[2]; 00671 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; step_[2] = (&_deltas[0])[5]; 00672 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; d_[2] = (&_deltas[0])[4]; 00673 } 00674 00675 void operator()( const BlockedRange& range ) const 00676 { 00677 uchar* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]); 00678 uchar* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]); 00679 uchar* p2 = p_[2] + range.begin()*(step_[2] + imageWidth_*d_[2]); 00680 uchar* mask = mask_ + range.begin()*mstep_; 00681 00682 for(int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1], p2 += step_[2] ) 00683 { 00684 if( !mask_ ) 00685 { 00686 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] ) 00687 { 00688 size_t idx = tab_[*p0] + tab_[*p1 + 256] + tab_[*p2 + 512]; 00689 if( idx < OUT_OF_RANGE ) 00690 { 00691 ( *(tbb::atomic<int>*)(globalHistogram_ + idx) ).fetch_and_add(1); 00692 } 00693 } 00694 } 00695 else 00696 { 00697 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] ) 00698 { 00699 size_t idx; 00700 if( mask[x] && (idx = tab_[*p0] + tab_[*p1 + 256] + tab_[*p2 + 512]) < OUT_OF_RANGE ) 00701 { 00702 (*(tbb::atomic<int>*)(globalHistogram_ + idx)).fetch_and_add(1); 00703 } 00704 } 00705 mask += mstep_; 00706 } 00707 } 00708 } 00709 00710 static bool isFit( const Mat& histogram, const Size imageSize ) 00711 { 00712 return ( histogram.total() >= 128*128*128 00713 && imageSize.width * imageSize.width >= 320*240 ); 00714 } 00715 00716 private: 00717 uchar* p_[three]; 00718 uchar* mask_; 00719 int mstep_; 00720 int step_[three]; 00721 int d_[three]; 00722 int* histogramSize_; 00723 int histogramType_; 00724 int imageWidth_; 00725 size_t* tab_; 00726 uchar* globalHistogram_; 00727 }; 00728 00729 static void 00730 callCalcHist2D_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00731 Size imsize, Mat& hist, int dims, std::vector<size_t>& _tab ) 00732 { 00733 int grainSize = imsize.height / tbb::task_scheduler_init::default_num_threads(); 00734 tbb::mutex histogramWriteLock; 00735 00736 CalcHist2D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab, &histogramWriteLock); 00737 parallel_for(BlockedRange(0, imsize.height, grainSize), body); 00738 } 00739 00740 static void 00741 callCalcHist3D_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00742 Size imsize, Mat& hist, int dims, std::vector<size_t>& _tab ) 00743 { 00744 CalcHist3D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab); 00745 parallel_for(BlockedRange(0, imsize.height), body); 00746 } 00747 #endif 00748 00749 template<typename T> static void 00750 calcHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00751 Size imsize, Mat& hist, int dims, const float** _ranges, 00752 const double* _uniranges, bool uniform ) 00753 { 00754 T** ptrs = (T**)&_ptrs[0]; 00755 const int* deltas = &_deltas[0]; 00756 uchar* H = hist.ptr(); 00757 int i, x; 00758 const uchar* mask = _ptrs[dims]; 00759 int mstep = _deltas[dims*2 + 1]; 00760 int size[CV_MAX_DIM]; 00761 size_t hstep[CV_MAX_DIM]; 00762 00763 for( i = 0; i < dims; i++ ) 00764 { 00765 size[i] = hist.size[i]; 00766 hstep[i] = hist.step[i]; 00767 } 00768 00769 if( uniform ) 00770 { 00771 const double* uniranges = &_uniranges[0]; 00772 00773 if( dims == 1 ) 00774 { 00775 #ifdef HAVE_TBB 00776 calcHist1D_Invoker<T> body(_ptrs, _deltas, hist, _uniranges, size[0], dims, imsize); 00777 parallel_for(BlockedRange(0, imsize.height), body); 00778 #else 00779 double a = uniranges[0], b = uniranges[1]; 00780 int sz = size[0], d0 = deltas[0], step0 = deltas[1]; 00781 const T* p0 = (const T*)ptrs[0]; 00782 00783 for( ; imsize.height--; p0 += step0, mask += mstep ) 00784 { 00785 if( !mask ) 00786 for( x = 0; x < imsize.width; x++, p0 += d0 ) 00787 { 00788 int idx = cvFloor(*p0*a + b); 00789 if( (unsigned)idx < (unsigned)sz ) 00790 ((int*)H)[idx]++; 00791 } 00792 else 00793 for( x = 0; x < imsize.width; x++, p0 += d0 ) 00794 if( mask[x] ) 00795 { 00796 int idx = cvFloor(*p0*a + b); 00797 if( (unsigned)idx < (unsigned)sz ) 00798 ((int*)H)[idx]++; 00799 } 00800 } 00801 #endif //HAVE_TBB 00802 return; 00803 } 00804 else if( dims == 2 ) 00805 { 00806 #ifdef HAVE_TBB 00807 calcHist2D_Invoker<T> body(_ptrs, _deltas, hist, _uniranges, size, dims, imsize, hstep); 00808 parallel_for(BlockedRange(0, imsize.height), body); 00809 #else 00810 double a0 = uniranges[0], b0 = uniranges[1], a1 = uniranges[2], b1 = uniranges[3]; 00811 int sz0 = size[0], sz1 = size[1]; 00812 int d0 = deltas[0], step0 = deltas[1], 00813 d1 = deltas[2], step1 = deltas[3]; 00814 size_t hstep0 = hstep[0]; 00815 const T* p0 = (const T*)ptrs[0]; 00816 const T* p1 = (const T*)ptrs[1]; 00817 00818 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep ) 00819 { 00820 if( !mask ) 00821 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 00822 { 00823 int idx0 = cvFloor(*p0*a0 + b0); 00824 int idx1 = cvFloor(*p1*a1 + b1); 00825 if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 ) 00826 ((int*)(H + hstep0*idx0))[idx1]++; 00827 } 00828 else 00829 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 00830 if( mask[x] ) 00831 { 00832 int idx0 = cvFloor(*p0*a0 + b0); 00833 int idx1 = cvFloor(*p1*a1 + b1); 00834 if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 ) 00835 ((int*)(H + hstep0*idx0))[idx1]++; 00836 } 00837 } 00838 #endif //HAVE_TBB 00839 return; 00840 } 00841 else if( dims == 3 ) 00842 { 00843 #ifdef HAVE_TBB 00844 if( calcHist3D_Invoker<T>::isFit(hist, imsize) ) 00845 { 00846 calcHist3D_Invoker<T> body(_ptrs, _deltas, imsize, hist, uniranges, dims, hstep, size); 00847 parallel_for(BlockedRange(0, imsize.height), body); 00848 return; 00849 } 00850 #endif 00851 double a0 = uniranges[0], b0 = uniranges[1], 00852 a1 = uniranges[2], b1 = uniranges[3], 00853 a2 = uniranges[4], b2 = uniranges[5]; 00854 int sz0 = size[0], sz1 = size[1], sz2 = size[2]; 00855 int d0 = deltas[0], step0 = deltas[1], 00856 d1 = deltas[2], step1 = deltas[3], 00857 d2 = deltas[4], step2 = deltas[5]; 00858 size_t hstep0 = hstep[0], hstep1 = hstep[1]; 00859 const T* p0 = (const T*)ptrs[0]; 00860 const T* p1 = (const T*)ptrs[1]; 00861 const T* p2 = (const T*)ptrs[2]; 00862 00863 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep ) 00864 { 00865 if( !mask ) 00866 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 00867 { 00868 int idx0 = cvFloor(*p0*a0 + b0); 00869 int idx1 = cvFloor(*p1*a1 + b1); 00870 int idx2 = cvFloor(*p2*a2 + b2); 00871 if( (unsigned)idx0 < (unsigned)sz0 && 00872 (unsigned)idx1 < (unsigned)sz1 && 00873 (unsigned)idx2 < (unsigned)sz2 ) 00874 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++; 00875 } 00876 else 00877 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 00878 if( mask[x] ) 00879 { 00880 int idx0 = cvFloor(*p0*a0 + b0); 00881 int idx1 = cvFloor(*p1*a1 + b1); 00882 int idx2 = cvFloor(*p2*a2 + b2); 00883 if( (unsigned)idx0 < (unsigned)sz0 && 00884 (unsigned)idx1 < (unsigned)sz1 && 00885 (unsigned)idx2 < (unsigned)sz2 ) 00886 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++; 00887 } 00888 } 00889 } 00890 else 00891 { 00892 for( ; imsize.height--; mask += mstep ) 00893 { 00894 if( !mask ) 00895 for( x = 0; x < imsize.width; x++ ) 00896 { 00897 uchar* Hptr = H; 00898 for( i = 0; i < dims; i++ ) 00899 { 00900 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); 00901 if( (unsigned)idx >= (unsigned)size[i] ) 00902 break; 00903 ptrs[i] += deltas[i*2]; 00904 Hptr += idx*hstep[i]; 00905 } 00906 00907 if( i == dims ) 00908 ++*((int*)Hptr); 00909 else 00910 for( ; i < dims; i++ ) 00911 ptrs[i] += deltas[i*2]; 00912 } 00913 else 00914 for( x = 0; x < imsize.width; x++ ) 00915 { 00916 uchar* Hptr = H; 00917 i = 0; 00918 if( mask[x] ) 00919 for( ; i < dims; i++ ) 00920 { 00921 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); 00922 if( (unsigned)idx >= (unsigned)size[i] ) 00923 break; 00924 ptrs[i] += deltas[i*2]; 00925 Hptr += idx*hstep[i]; 00926 } 00927 00928 if( i == dims ) 00929 ++*((int*)Hptr); 00930 else 00931 for( ; i < dims; i++ ) 00932 ptrs[i] += deltas[i*2]; 00933 } 00934 for( i = 0; i < dims; i++ ) 00935 ptrs[i] += deltas[i*2 + 1]; 00936 } 00937 } 00938 } 00939 else 00940 { 00941 // non-uniform histogram 00942 const float* ranges[CV_MAX_DIM]; 00943 for( i = 0; i < dims; i++ ) 00944 ranges[i] = &_ranges[i][0]; 00945 00946 for( ; imsize.height--; mask += mstep ) 00947 { 00948 for( x = 0; x < imsize.width; x++ ) 00949 { 00950 uchar* Hptr = H; 00951 i = 0; 00952 00953 if( !mask || mask[x] ) 00954 for( ; i < dims; i++ ) 00955 { 00956 float v = (float)*ptrs[i]; 00957 const float* R = ranges[i]; 00958 int idx = -1, sz = size[i]; 00959 00960 while( v >= R[idx+1] && ++idx < sz ) 00961 ; // nop 00962 00963 if( (unsigned)idx >= (unsigned)sz ) 00964 break; 00965 00966 ptrs[i] += deltas[i*2]; 00967 Hptr += idx*hstep[i]; 00968 } 00969 00970 if( i == dims ) 00971 ++*((int*)Hptr); 00972 else 00973 for( ; i < dims; i++ ) 00974 ptrs[i] += deltas[i*2]; 00975 } 00976 00977 for( i = 0; i < dims; i++ ) 00978 ptrs[i] += deltas[i*2 + 1]; 00979 } 00980 } 00981 } 00982 00983 00984 static void 00985 calcHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 00986 Size imsize, Mat& hist, int dims, const float** _ranges, 00987 const double* _uniranges, bool uniform ) 00988 { 00989 uchar** ptrs = &_ptrs[0]; 00990 const int* deltas = &_deltas[0]; 00991 uchar* H = hist.ptr(); 00992 int x; 00993 const uchar* mask = _ptrs[dims]; 00994 int mstep = _deltas[dims*2 + 1]; 00995 std::vector<size_t> _tab; 00996 00997 calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab ); 00998 const size_t* tab = &_tab[0]; 00999 01000 if( dims == 1 ) 01001 { 01002 #ifdef HAVE_TBB 01003 if( CalcHist1D_8uInvoker::isFit(hist, imsize) ) 01004 { 01005 int treadsNumber = tbb::task_scheduler_init::default_num_threads(); 01006 int grainSize = imsize.height/treadsNumber; 01007 tbb::mutex histogramWriteLock; 01008 01009 CalcHist1D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab, &histogramWriteLock); 01010 parallel_for(BlockedRange(0, imsize.height, grainSize), body); 01011 return; 01012 } 01013 #endif 01014 int d0 = deltas[0], step0 = deltas[1]; 01015 int matH[256] = { 0, }; 01016 const uchar* p0 = (const uchar*)ptrs[0]; 01017 01018 for( ; imsize.height--; p0 += step0, mask += mstep ) 01019 { 01020 if( !mask ) 01021 { 01022 if( d0 == 1 ) 01023 { 01024 for( x = 0; x <= imsize.width - 4; x += 4 ) 01025 { 01026 int t0 = p0[x], t1 = p0[x+1]; 01027 matH[t0]++; matH[t1]++; 01028 t0 = p0[x+2]; t1 = p0[x+3]; 01029 matH[t0]++; matH[t1]++; 01030 } 01031 p0 += x; 01032 } 01033 else 01034 for( x = 0; x <= imsize.width - 4; x += 4 ) 01035 { 01036 int t0 = p0[0], t1 = p0[d0]; 01037 matH[t0]++; matH[t1]++; 01038 p0 += d0*2; 01039 t0 = p0[0]; t1 = p0[d0]; 01040 matH[t0]++; matH[t1]++; 01041 p0 += d0*2; 01042 } 01043 01044 for( ; x < imsize.width; x++, p0 += d0 ) 01045 matH[*p0]++; 01046 } 01047 else 01048 for( x = 0; x < imsize.width; x++, p0 += d0 ) 01049 if( mask[x] ) 01050 matH[*p0]++; 01051 } 01052 01053 for(int i = 0; i < 256; i++ ) 01054 { 01055 size_t hidx = tab[i]; 01056 if( hidx < OUT_OF_RANGE ) 01057 *(int*)(H + hidx) += matH[i]; 01058 } 01059 } 01060 else if( dims == 2 ) 01061 { 01062 #ifdef HAVE_TBB 01063 if( CalcHist2D_8uInvoker::isFit(hist, imsize) ) 01064 { 01065 callCalcHist2D_8u(_ptrs, _deltas, imsize, hist, dims, _tab); 01066 return; 01067 } 01068 #endif 01069 int d0 = deltas[0], step0 = deltas[1], 01070 d1 = deltas[2], step1 = deltas[3]; 01071 const uchar* p0 = (const uchar*)ptrs[0]; 01072 const uchar* p1 = (const uchar*)ptrs[1]; 01073 01074 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep ) 01075 { 01076 if( !mask ) 01077 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 01078 { 01079 size_t idx = tab[*p0] + tab[*p1 + 256]; 01080 if( idx < OUT_OF_RANGE ) 01081 ++*(int*)(H + idx); 01082 } 01083 else 01084 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 01085 { 01086 size_t idx; 01087 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256]) < OUT_OF_RANGE ) 01088 ++*(int*)(H + idx); 01089 } 01090 } 01091 } 01092 else if( dims == 3 ) 01093 { 01094 #ifdef HAVE_TBB 01095 if( CalcHist3D_8uInvoker::isFit(hist, imsize) ) 01096 { 01097 callCalcHist3D_8u(_ptrs, _deltas, imsize, hist, dims, _tab); 01098 return; 01099 } 01100 #endif 01101 int d0 = deltas[0], step0 = deltas[1], 01102 d1 = deltas[2], step1 = deltas[3], 01103 d2 = deltas[4], step2 = deltas[5]; 01104 01105 const uchar* p0 = (const uchar*)ptrs[0]; 01106 const uchar* p1 = (const uchar*)ptrs[1]; 01107 const uchar* p2 = (const uchar*)ptrs[2]; 01108 01109 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep ) 01110 { 01111 if( !mask ) 01112 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 01113 { 01114 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]; 01115 if( idx < OUT_OF_RANGE ) 01116 ++*(int*)(H + idx); 01117 } 01118 else 01119 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 01120 { 01121 size_t idx; 01122 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]) < OUT_OF_RANGE ) 01123 ++*(int*)(H + idx); 01124 } 01125 } 01126 } 01127 else 01128 { 01129 for( ; imsize.height--; mask += mstep ) 01130 { 01131 if( !mask ) 01132 for( x = 0; x < imsize.width; x++ ) 01133 { 01134 uchar* Hptr = H; 01135 int i = 0; 01136 for( ; i < dims; i++ ) 01137 { 01138 size_t idx = tab[*ptrs[i] + i*256]; 01139 if( idx >= OUT_OF_RANGE ) 01140 break; 01141 Hptr += idx; 01142 ptrs[i] += deltas[i*2]; 01143 } 01144 01145 if( i == dims ) 01146 ++*((int*)Hptr); 01147 else 01148 for( ; i < dims; i++ ) 01149 ptrs[i] += deltas[i*2]; 01150 } 01151 else 01152 for( x = 0; x < imsize.width; x++ ) 01153 { 01154 uchar* Hptr = H; 01155 int i = 0; 01156 if( mask[x] ) 01157 for( ; i < dims; i++ ) 01158 { 01159 size_t idx = tab[*ptrs[i] + i*256]; 01160 if( idx >= OUT_OF_RANGE ) 01161 break; 01162 Hptr += idx; 01163 ptrs[i] += deltas[i*2]; 01164 } 01165 01166 if( i == dims ) 01167 ++*((int*)Hptr); 01168 else 01169 for( ; i < dims; i++ ) 01170 ptrs[i] += deltas[i*2]; 01171 } 01172 for(int i = 0; i < dims; i++ ) 01173 ptrs[i] += deltas[i*2 + 1]; 01174 } 01175 } 01176 } 01177 01178 #ifdef HAVE_IPP 01179 class IPPCalcHistInvoker : 01180 public ParallelLoopBody 01181 { 01182 public: 01183 IPPCalcHistInvoker(const Mat & _src, Mat & _hist, AutoBuffer<Ipp32f> & _levels, Ipp32s _histSize, Ipp32f _low, Ipp32f _high, bool * _ok) : 01184 ParallelLoopBody(), src(&_src), hist(&_hist), levels(&_levels), histSize(_histSize), low(_low), high(_high), ok(_ok) 01185 { 01186 *ok = true; 01187 } 01188 01189 virtual void operator() (const Range & range) const 01190 { 01191 Mat phist(hist->size(), hist->type(), Scalar::all(0)); 01192 #if IPP_VERSION_X100 >= 900 01193 IppiSize roi = {src->cols, range.end - range.start}; 01194 int bufferSize = 0; 01195 int specSize = 0; 01196 IppiHistogramSpec *pSpec = NULL; 01197 Ipp8u *pBuffer = NULL; 01198 01199 if(ippiHistogramGetBufferSize(ipp8u, roi, &histSize, 1, 1, &specSize, &bufferSize) < 0) 01200 { 01201 *ok = false; 01202 return; 01203 } 01204 01205 pBuffer = (Ipp8u*)ippMalloc(bufferSize); 01206 if(!pBuffer && bufferSize) 01207 { 01208 *ok = false; 01209 return; 01210 } 01211 01212 pSpec = (IppiHistogramSpec*)ippMalloc(specSize); 01213 if(!pSpec && specSize) 01214 { 01215 if(pBuffer) ippFree(pBuffer); 01216 *ok = false; 01217 return; 01218 } 01219 01220 if(ippiHistogramUniformInit(ipp8u, (Ipp32f*)&low, (Ipp32f*)&high, (Ipp32s*)&histSize, 1, pSpec) < 0) 01221 { 01222 if(pSpec) ippFree(pSpec); 01223 if(pBuffer) ippFree(pBuffer); 01224 *ok = false; 01225 return; 01226 } 01227 01228 IppStatus status = ippiHistogram_8u_C1R(src->ptr(range.start), (int)src->step, ippiSize(src->cols, range.end - range.start), 01229 phist.ptr<Ipp32u>(), pSpec, pBuffer); 01230 01231 if(pSpec) ippFree(pSpec); 01232 if(pBuffer) ippFree(pBuffer); 01233 #else 01234 CV_SUPPRESS_DEPRECATED_START 01235 IppStatus status = ippiHistogramEven_8u_C1R(src->ptr(range.start), (int)src->step, ippiSize(src->cols, range.end - range.start), 01236 phist.ptr<Ipp32s>(), (Ipp32s*)(Ipp32f*)*levels, histSize, (Ipp32s)low, (Ipp32s)high); 01237 CV_SUPPRESS_DEPRECATED_END 01238 #endif 01239 if(status < 0) 01240 { 01241 *ok = false; 01242 return; 01243 } 01244 01245 for (int i = 0; i < histSize; ++i) 01246 CV_XADD((int *)(hist->data + i * hist->step), *(int *)(phist.data + i * phist.step)); 01247 } 01248 01249 private: 01250 const Mat * src; 01251 Mat * hist; 01252 AutoBuffer<Ipp32f> * levels; 01253 Ipp32s histSize; 01254 Ipp32f low, high; 01255 bool * ok; 01256 01257 const IPPCalcHistInvoker & operator = (const IPPCalcHistInvoker & ); 01258 }; 01259 01260 #endif 01261 01262 } 01263 01264 #if defined(HAVE_IPP) 01265 namespace cv 01266 { 01267 static bool ipp_calchist(const Mat* images, int nimages, const int* channels, 01268 InputArray _mask, OutputArray _hist, int dims, const int* histSize, 01269 const float** ranges, bool uniform, bool accumulate ) 01270 { 01271 Mat mask = _mask.getMat(); 01272 01273 CV_Assert(dims > 0 && histSize); 01274 01275 _hist.create(dims, histSize, CV_32F); 01276 Mat hist = _hist.getMat(), ihist = hist; 01277 ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S; 01278 01279 { 01280 if (nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && channels && 01281 channels[0] == 0 && mask.empty() && images[0].dims <= 2 && 01282 !accumulate && uniform) 01283 { 01284 ihist.setTo(Scalar::all(0)); 01285 AutoBuffer<Ipp32f> levels(histSize[0] + 1); 01286 01287 bool ok = true; 01288 const Mat & src = images[0]; 01289 int nstripes = std::min<int>(8, static_cast<int>(src.total() / (1 << 16))); 01290 #ifdef HAVE_CONCURRENCY 01291 nstripes = 1; 01292 #endif 01293 IPPCalcHistInvoker invoker(src, ihist, levels, histSize[0] + 1, ranges[0][0], ranges[0][1], &ok); 01294 Range range(0, src.rows); 01295 parallel_for_(range, invoker, nstripes); 01296 01297 if (ok) 01298 { 01299 ihist.convertTo(hist, CV_32F); 01300 return true; 01301 } 01302 } 01303 } 01304 return false; 01305 } 01306 } 01307 #endif 01308 01309 void cv::calcHist ( const Mat* images, int nimages, const int* channels, 01310 InputArray _mask, OutputArray _hist, int dims, const int* histSize, 01311 const float** ranges, bool uniform, bool accumulate ) 01312 { 01313 01314 CV_IPP_RUN(nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && channels && 01315 channels[0] == 0 && _mask.getMat().empty() && images[0].dims <= 2 && 01316 !accumulate && uniform, 01317 ipp_calchist(images, nimages, channels, 01318 _mask, _hist, dims, histSize, 01319 ranges, uniform, accumulate)); 01320 01321 Mat mask = _mask.getMat(); 01322 01323 CV_Assert(dims > 0 && histSize); 01324 01325 const uchar* const histdata = _hist.getMat().ptr(); 01326 _hist.create(dims, histSize, CV_32F); 01327 Mat hist = _hist.getMat(), ihist = hist; 01328 ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S; 01329 01330 if( !accumulate || histdata != hist.data ) 01331 hist = Scalar (0.); 01332 else 01333 hist.convertTo(ihist, CV_32S); 01334 01335 std::vector<uchar*> ptrs; 01336 std::vector<int> deltas; 01337 std::vector<double> uniranges; 01338 Size imsize; 01339 01340 CV_Assert( mask.empty() || mask.type() == CV_8UC1 ); 01341 histPrepareImages( images, nimages, channels, mask, dims, hist.size, ranges, 01342 uniform, ptrs, deltas, imsize, uniranges ); 01343 const double* _uniranges = uniform ? &uniranges[0] : 0; 01344 01345 int depth = images[0].depth(); 01346 01347 if( depth == CV_8U ) 01348 calcHist_8u(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); 01349 else if( depth == CV_16U ) 01350 calcHist_<ushort>(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); 01351 else if( depth == CV_32F ) 01352 calcHist_<float>(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); 01353 else 01354 CV_Error(CV_StsUnsupportedFormat, ""); 01355 01356 ihist.convertTo(hist, CV_32F); 01357 } 01358 01359 01360 namespace cv 01361 { 01362 01363 template<typename T> static void 01364 calcSparseHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 01365 Size imsize, SparseMat& hist, int dims, const float** _ranges, 01366 const double* _uniranges, bool uniform ) 01367 { 01368 T** ptrs = (T**)&_ptrs[0]; 01369 const int* deltas = &_deltas[0]; 01370 int i, x; 01371 const uchar* mask = _ptrs[dims]; 01372 int mstep = _deltas[dims*2 + 1]; 01373 const int* size = hist.hdr->size; 01374 int idx[CV_MAX_DIM]; 01375 01376 if( uniform ) 01377 { 01378 const double* uniranges = &_uniranges[0]; 01379 01380 for( ; imsize.height--; mask += mstep ) 01381 { 01382 for( x = 0; x < imsize.width; x++ ) 01383 { 01384 i = 0; 01385 if( !mask || mask[x] ) 01386 for( ; i < dims; i++ ) 01387 { 01388 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); 01389 if( (unsigned)idx[i] >= (unsigned)size[i] ) 01390 break; 01391 ptrs[i] += deltas[i*2]; 01392 } 01393 01394 if( i == dims ) 01395 ++*(int*)hist.ptr(idx, true); 01396 else 01397 for( ; i < dims; i++ ) 01398 ptrs[i] += deltas[i*2]; 01399 } 01400 for( i = 0; i < dims; i++ ) 01401 ptrs[i] += deltas[i*2 + 1]; 01402 } 01403 } 01404 else 01405 { 01406 // non-uniform histogram 01407 const float* ranges[CV_MAX_DIM]; 01408 for( i = 0; i < dims; i++ ) 01409 ranges[i] = &_ranges[i][0]; 01410 01411 for( ; imsize.height--; mask += mstep ) 01412 { 01413 for( x = 0; x < imsize.width; x++ ) 01414 { 01415 i = 0; 01416 01417 if( !mask || mask[x] ) 01418 for( ; i < dims; i++ ) 01419 { 01420 float v = (float)*ptrs[i]; 01421 const float* R = ranges[i]; 01422 int j = -1, sz = size[i]; 01423 01424 while( v >= R[j+1] && ++j < sz ) 01425 ; // nop 01426 01427 if( (unsigned)j >= (unsigned)sz ) 01428 break; 01429 ptrs[i] += deltas[i*2]; 01430 idx[i] = j; 01431 } 01432 01433 if( i == dims ) 01434 ++*(int*)hist.ptr(idx, true); 01435 else 01436 for( ; i < dims; i++ ) 01437 ptrs[i] += deltas[i*2]; 01438 } 01439 01440 for( i = 0; i < dims; i++ ) 01441 ptrs[i] += deltas[i*2 + 1]; 01442 } 01443 } 01444 } 01445 01446 01447 static void 01448 calcSparseHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 01449 Size imsize, SparseMat& hist, int dims, const float** _ranges, 01450 const double* _uniranges, bool uniform ) 01451 { 01452 uchar** ptrs = (uchar**)&_ptrs[0]; 01453 const int* deltas = &_deltas[0]; 01454 int x; 01455 const uchar* mask = _ptrs[dims]; 01456 int mstep = _deltas[dims*2 + 1]; 01457 int idx[CV_MAX_DIM]; 01458 std::vector<size_t> _tab; 01459 01460 calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab ); 01461 const size_t* tab = &_tab[0]; 01462 01463 for( ; imsize.height--; mask += mstep ) 01464 { 01465 for( x = 0; x < imsize.width; x++ ) 01466 { 01467 int i = 0; 01468 if( !mask || mask[x] ) 01469 for( ; i < dims; i++ ) 01470 { 01471 size_t hidx = tab[*ptrs[i] + i*256]; 01472 if( hidx >= OUT_OF_RANGE ) 01473 break; 01474 ptrs[i] += deltas[i*2]; 01475 idx[i] = (int)hidx; 01476 } 01477 01478 if( i == dims ) 01479 ++*(int*)hist.ptr(idx,true); 01480 else 01481 for( ; i < dims; i++ ) 01482 ptrs[i] += deltas[i*2]; 01483 } 01484 for(int i = 0; i < dims; i++ ) 01485 ptrs[i] += deltas[i*2 + 1]; 01486 } 01487 } 01488 01489 01490 static void calcHist( const Mat* images, int nimages, const int* channels, 01491 const Mat& mask, SparseMat& hist, int dims, const int* histSize, 01492 const float** ranges, bool uniform, bool accumulate, bool keepInt ) 01493 { 01494 size_t i, N; 01495 01496 if( !accumulate ) 01497 hist.create(dims, histSize, CV_32F); 01498 else 01499 { 01500 SparseMatIterator it = hist.begin(); 01501 for( i = 0, N = hist.nzcount(); i < N; i++, ++it ) 01502 { 01503 Cv32suf* val = (Cv32suf*)it.ptr; 01504 val->i = cvRound(val->f); 01505 } 01506 } 01507 01508 std::vector<uchar*> ptrs; 01509 std::vector<int> deltas; 01510 std::vector<double> uniranges; 01511 Size imsize; 01512 01513 CV_Assert( mask.empty() || mask.type() == CV_8UC1 ); 01514 histPrepareImages( images, nimages, channels, mask, dims, hist.hdr->size, ranges, 01515 uniform, ptrs, deltas, imsize, uniranges ); 01516 const double* _uniranges = uniform ? &uniranges[0] : 0; 01517 01518 int depth = images[0].depth(); 01519 if( depth == CV_8U ) 01520 calcSparseHist_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); 01521 else if( depth == CV_16U ) 01522 calcSparseHist_<ushort>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); 01523 else if( depth == CV_32F ) 01524 calcSparseHist_<float>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); 01525 else 01526 CV_Error(CV_StsUnsupportedFormat, ""); 01527 01528 if( !keepInt ) 01529 { 01530 SparseMatIterator it = hist.begin(); 01531 for( i = 0, N = hist.nzcount(); i < N; i++, ++it ) 01532 { 01533 Cv32suf* val = (Cv32suf*)it.ptr; 01534 val->f = (float)val->i; 01535 } 01536 } 01537 } 01538 01539 #ifdef HAVE_OPENCL 01540 01541 enum 01542 { 01543 BINS = 256 01544 }; 01545 01546 static bool ocl_calcHist1(InputArray _src, OutputArray _hist, int ddepth = CV_32S) 01547 { 01548 const ocl::Device & dev = ocl::Device::getDefault(); 01549 int compunits = dev.maxComputeUnits(); 01550 size_t wgs = dev.maxWorkGroupSize(); 01551 Size size = _src.size(); 01552 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0; 01553 int kercn = dev.isAMD() && use16 ? 16 : std::min(4, ocl::predictOptimalVectorWidth(_src)); 01554 01555 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc, 01556 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D kercn=%d -D T=%s%s", 01557 BINS, compunits, wgs, kercn, 01558 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)), 01559 _src.isContinuous() ? " -D HAVE_SRC_CONT" : "")); 01560 if (k1.empty()) 01561 return false; 01562 01563 _hist.create(BINS, 1, ddepth); 01564 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1), 01565 hist = _hist.getUMat(); 01566 01567 k1.args(ocl::KernelArg::ReadOnly(src), 01568 ocl::KernelArg::PtrWriteOnly(ghist), (int)src.total()); 01569 01570 size_t globalsize = compunits * wgs; 01571 if (!k1.run(1, &globalsize, &wgs, false)) 01572 return false; 01573 01574 char cvt[40]; 01575 ocl::Kernel k2("merge_histogram", ocl::imgproc::histogram_oclsrc, 01576 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D convertToHT=%s -D HT=%s", 01577 BINS, compunits, (int)wgs, ocl::convertTypeStr(CV_32S, ddepth, 1, cvt), 01578 ocl::typeToStr(ddepth))); 01579 if (k2.empty()) 01580 return false; 01581 01582 k2.args(ocl::KernelArg::PtrReadOnly(ghist), 01583 ocl::KernelArg::WriteOnlyNoSize(hist)); 01584 01585 return k2.run(1, &wgs, &wgs, false); 01586 } 01587 01588 static bool ocl_calcHist(InputArrayOfArrays images, OutputArray hist) 01589 { 01590 std::vector<UMat> v; 01591 images.getUMatVector(v); 01592 01593 return ocl_calcHist1(v[0], hist, CV_32F); 01594 } 01595 01596 #endif 01597 01598 } 01599 01600 void cv::calcHist ( const Mat* images, int nimages, const int* channels, 01601 InputArray _mask, SparseMat& hist, int dims, const int* histSize, 01602 const float** ranges, bool uniform, bool accumulate ) 01603 { 01604 Mat mask = _mask.getMat(); 01605 calcHist( images, nimages, channels, mask, hist, dims, histSize, 01606 ranges, uniform, accumulate, false ); 01607 } 01608 01609 01610 void cv::calcHist ( InputArrayOfArrays images, const std::vector<int>& channels, 01611 InputArray mask, OutputArray hist, 01612 const std::vector<int>& histSize, 01613 const std::vector<float>& ranges, 01614 bool accumulate ) 01615 { 01616 #ifdef HAVE_OPENCL 01617 CV_OCL_RUN(images.total() == 1 && channels.size() == 1 && images.channels(0) == 1 && 01618 channels[0] == 0 && images.isUMatVector() && mask.empty() && !accumulate && 01619 histSize.size() == 1 && histSize[0] == BINS && ranges.size() == 2 && 01620 ranges[0] == 0 && ranges[1] == BINS, 01621 ocl_calcHist(images, hist)) 01622 #endif 01623 01624 int i, dims = (int)histSize.size(), rsz = (int)ranges.size(), csz = (int)channels.size(); 01625 int nimages = (int)images.total(); 01626 01627 CV_Assert(nimages > 0 && dims > 0); 01628 CV_Assert(rsz == dims*2 || (rsz == 0 && images.depth(0) == CV_8U)); 01629 CV_Assert(csz == 0 || csz == dims); 01630 float* _ranges[CV_MAX_DIM]; 01631 if( rsz > 0 ) 01632 { 01633 for( i = 0; i < rsz/2; i++ ) 01634 _ranges[i] = (float*)&ranges[i*2]; 01635 } 01636 01637 AutoBuffer<Mat> buf(nimages); 01638 for( i = 0; i < nimages; i++ ) 01639 buf[i] = images.getMat(i); 01640 01641 calcHist(&buf[0], nimages, csz ? &channels[0] : 0, 01642 mask, hist, dims, &histSize[0], rsz ? (const float**)_ranges : 0, 01643 true, accumulate); 01644 } 01645 01646 01647 /////////////////////////////////////// B A C K P R O J E C T //////////////////////////////////// 01648 01649 namespace cv 01650 { 01651 01652 template<typename T, typename BT> static void 01653 calcBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 01654 Size imsize, const Mat& hist, int dims, const float** _ranges, 01655 const double* _uniranges, float scale, bool uniform ) 01656 { 01657 T** ptrs = (T**)&_ptrs[0]; 01658 const int* deltas = &_deltas[0]; 01659 const uchar* H = hist.ptr(); 01660 int i, x; 01661 BT* bproj = (BT*)_ptrs[dims]; 01662 int bpstep = _deltas[dims*2 + 1]; 01663 int size[CV_MAX_DIM]; 01664 size_t hstep[CV_MAX_DIM]; 01665 01666 for( i = 0; i < dims; i++ ) 01667 { 01668 size[i] = hist.size[i]; 01669 hstep[i] = hist.step[i]; 01670 } 01671 01672 if( uniform ) 01673 { 01674 const double* uniranges = &_uniranges[0]; 01675 01676 if( dims == 1 ) 01677 { 01678 double a = uniranges[0], b = uniranges[1]; 01679 int sz = size[0], d0 = deltas[0], step0 = deltas[1]; 01680 const T* p0 = (const T*)ptrs[0]; 01681 01682 for( ; imsize.height--; p0 += step0, bproj += bpstep ) 01683 { 01684 for( x = 0; x < imsize.width; x++, p0 += d0 ) 01685 { 01686 int idx = cvFloor(*p0*a + b); 01687 bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast<BT>(((const float*)H)[idx]*scale) : 0; 01688 } 01689 } 01690 } 01691 else if( dims == 2 ) 01692 { 01693 double a0 = uniranges[0], b0 = uniranges[1], 01694 a1 = uniranges[2], b1 = uniranges[3]; 01695 int sz0 = size[0], sz1 = size[1]; 01696 int d0 = deltas[0], step0 = deltas[1], 01697 d1 = deltas[2], step1 = deltas[3]; 01698 size_t hstep0 = hstep[0]; 01699 const T* p0 = (const T*)ptrs[0]; 01700 const T* p1 = (const T*)ptrs[1]; 01701 01702 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep ) 01703 { 01704 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 01705 { 01706 int idx0 = cvFloor(*p0*a0 + b0); 01707 int idx1 = cvFloor(*p1*a1 + b1); 01708 bproj[x] = (unsigned)idx0 < (unsigned)sz0 && 01709 (unsigned)idx1 < (unsigned)sz1 ? 01710 saturate_cast<BT>(((const float*)(H + hstep0*idx0))[idx1]*scale) : 0; 01711 } 01712 } 01713 } 01714 else if( dims == 3 ) 01715 { 01716 double a0 = uniranges[0], b0 = uniranges[1], 01717 a1 = uniranges[2], b1 = uniranges[3], 01718 a2 = uniranges[4], b2 = uniranges[5]; 01719 int sz0 = size[0], sz1 = size[1], sz2 = size[2]; 01720 int d0 = deltas[0], step0 = deltas[1], 01721 d1 = deltas[2], step1 = deltas[3], 01722 d2 = deltas[4], step2 = deltas[5]; 01723 size_t hstep0 = hstep[0], hstep1 = hstep[1]; 01724 const T* p0 = (const T*)ptrs[0]; 01725 const T* p1 = (const T*)ptrs[1]; 01726 const T* p2 = (const T*)ptrs[2]; 01727 01728 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep ) 01729 { 01730 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 01731 { 01732 int idx0 = cvFloor(*p0*a0 + b0); 01733 int idx1 = cvFloor(*p1*a1 + b1); 01734 int idx2 = cvFloor(*p2*a2 + b2); 01735 bproj[x] = (unsigned)idx0 < (unsigned)sz0 && 01736 (unsigned)idx1 < (unsigned)sz1 && 01737 (unsigned)idx2 < (unsigned)sz2 ? 01738 saturate_cast<BT>(((const float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0; 01739 } 01740 } 01741 } 01742 else 01743 { 01744 for( ; imsize.height--; bproj += bpstep ) 01745 { 01746 for( x = 0; x < imsize.width; x++ ) 01747 { 01748 const uchar* Hptr = H; 01749 for( i = 0; i < dims; i++ ) 01750 { 01751 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); 01752 if( (unsigned)idx >= (unsigned)size[i] || (_ranges && *ptrs[i] >= _ranges[i][1])) 01753 break; 01754 ptrs[i] += deltas[i*2]; 01755 Hptr += idx*hstep[i]; 01756 } 01757 01758 if( i == dims ) 01759 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale); 01760 else 01761 { 01762 bproj[x] = 0; 01763 for( ; i < dims; i++ ) 01764 ptrs[i] += deltas[i*2]; 01765 } 01766 } 01767 for( i = 0; i < dims; i++ ) 01768 ptrs[i] += deltas[i*2 + 1]; 01769 } 01770 } 01771 } 01772 else 01773 { 01774 // non-uniform histogram 01775 const float* ranges[CV_MAX_DIM]; 01776 for( i = 0; i < dims; i++ ) 01777 ranges[i] = &_ranges[i][0]; 01778 01779 for( ; imsize.height--; bproj += bpstep ) 01780 { 01781 for( x = 0; x < imsize.width; x++ ) 01782 { 01783 const uchar* Hptr = H; 01784 for( i = 0; i < dims; i++ ) 01785 { 01786 float v = (float)*ptrs[i]; 01787 const float* R = ranges[i]; 01788 int idx = -1, sz = size[i]; 01789 01790 while( v >= R[idx+1] && ++idx < sz ) 01791 ; // nop 01792 01793 if( (unsigned)idx >= (unsigned)sz ) 01794 break; 01795 01796 ptrs[i] += deltas[i*2]; 01797 Hptr += idx*hstep[i]; 01798 } 01799 01800 if( i == dims ) 01801 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale); 01802 else 01803 { 01804 bproj[x] = 0; 01805 for( ; i < dims; i++ ) 01806 ptrs[i] += deltas[i*2]; 01807 } 01808 } 01809 01810 for( i = 0; i < dims; i++ ) 01811 ptrs[i] += deltas[i*2 + 1]; 01812 } 01813 } 01814 } 01815 01816 01817 static void 01818 calcBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 01819 Size imsize, const Mat& hist, int dims, const float** _ranges, 01820 const double* _uniranges, float scale, bool uniform ) 01821 { 01822 uchar** ptrs = &_ptrs[0]; 01823 const int* deltas = &_deltas[0]; 01824 const uchar* H = hist.ptr(); 01825 int i, x; 01826 uchar* bproj = _ptrs[dims]; 01827 int bpstep = _deltas[dims*2 + 1]; 01828 std::vector<size_t> _tab; 01829 01830 calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab ); 01831 const size_t* tab = &_tab[0]; 01832 01833 if( dims == 1 ) 01834 { 01835 int d0 = deltas[0], step0 = deltas[1]; 01836 uchar matH[256] = {0}; 01837 const uchar* p0 = (const uchar*)ptrs[0]; 01838 01839 for( i = 0; i < 256; i++ ) 01840 { 01841 size_t hidx = tab[i]; 01842 if( hidx < OUT_OF_RANGE ) 01843 matH[i] = saturate_cast<uchar>(*(float*)(H + hidx)*scale); 01844 } 01845 01846 for( ; imsize.height--; p0 += step0, bproj += bpstep ) 01847 { 01848 if( d0 == 1 ) 01849 { 01850 for( x = 0; x <= imsize.width - 4; x += 4 ) 01851 { 01852 uchar t0 = matH[p0[x]], t1 = matH[p0[x+1]]; 01853 bproj[x] = t0; bproj[x+1] = t1; 01854 t0 = matH[p0[x+2]]; t1 = matH[p0[x+3]]; 01855 bproj[x+2] = t0; bproj[x+3] = t1; 01856 } 01857 p0 += x; 01858 } 01859 else 01860 for( x = 0; x <= imsize.width - 4; x += 4 ) 01861 { 01862 uchar t0 = matH[p0[0]], t1 = matH[p0[d0]]; 01863 bproj[x] = t0; bproj[x+1] = t1; 01864 p0 += d0*2; 01865 t0 = matH[p0[0]]; t1 = matH[p0[d0]]; 01866 bproj[x+2] = t0; bproj[x+3] = t1; 01867 p0 += d0*2; 01868 } 01869 01870 for( ; x < imsize.width; x++, p0 += d0 ) 01871 bproj[x] = matH[*p0]; 01872 } 01873 } 01874 else if( dims == 2 ) 01875 { 01876 int d0 = deltas[0], step0 = deltas[1], 01877 d1 = deltas[2], step1 = deltas[3]; 01878 const uchar* p0 = (const uchar*)ptrs[0]; 01879 const uchar* p1 = (const uchar*)ptrs[1]; 01880 01881 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep ) 01882 { 01883 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) 01884 { 01885 size_t idx = tab[*p0] + tab[*p1 + 256]; 01886 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0; 01887 } 01888 } 01889 } 01890 else if( dims == 3 ) 01891 { 01892 int d0 = deltas[0], step0 = deltas[1], 01893 d1 = deltas[2], step1 = deltas[3], 01894 d2 = deltas[4], step2 = deltas[5]; 01895 const uchar* p0 = (const uchar*)ptrs[0]; 01896 const uchar* p1 = (const uchar*)ptrs[1]; 01897 const uchar* p2 = (const uchar*)ptrs[2]; 01898 01899 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep ) 01900 { 01901 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) 01902 { 01903 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]; 01904 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0; 01905 } 01906 } 01907 } 01908 else 01909 { 01910 for( ; imsize.height--; bproj += bpstep ) 01911 { 01912 for( x = 0; x < imsize.width; x++ ) 01913 { 01914 const uchar* Hptr = H; 01915 for( i = 0; i < dims; i++ ) 01916 { 01917 size_t idx = tab[*ptrs[i] + i*256]; 01918 if( idx >= OUT_OF_RANGE ) 01919 break; 01920 ptrs[i] += deltas[i*2]; 01921 Hptr += idx; 01922 } 01923 01924 if( i == dims ) 01925 bproj[x] = saturate_cast<uchar>(*(const float*)Hptr*scale); 01926 else 01927 { 01928 bproj[x] = 0; 01929 for( ; i < dims; i++ ) 01930 ptrs[i] += deltas[i*2]; 01931 } 01932 } 01933 for( i = 0; i < dims; i++ ) 01934 ptrs[i] += deltas[i*2 + 1]; 01935 } 01936 } 01937 } 01938 01939 } 01940 01941 void cv::calcBackProject( const Mat* images, int nimages, const int* channels, 01942 InputArray _hist, OutputArray _backProject, 01943 const float** ranges, double scale, bool uniform ) 01944 { 01945 Mat hist = _hist.getMat(); 01946 std::vector<uchar*> ptrs; 01947 std::vector<int> deltas; 01948 std::vector<double> uniranges; 01949 Size imsize; 01950 int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims; 01951 01952 CV_Assert( dims > 0 && !hist.empty() ); 01953 _backProject.create( images[0].size(), images[0].depth() ); 01954 Mat backProject = _backProject.getMat(); 01955 histPrepareImages( images, nimages, channels, backProject, dims, hist.size, ranges, 01956 uniform, ptrs, deltas, imsize, uniranges ); 01957 const double* _uniranges = uniform ? &uniranges[0] : 0; 01958 01959 int depth = images[0].depth(); 01960 if( depth == CV_8U ) 01961 calcBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform); 01962 else if( depth == CV_16U ) 01963 calcBackProj_<ushort, ushort>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform ); 01964 else if( depth == CV_32F ) 01965 calcBackProj_<float, float>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform ); 01966 else 01967 CV_Error(CV_StsUnsupportedFormat, ""); 01968 } 01969 01970 01971 namespace cv 01972 { 01973 01974 template<typename T, typename BT> static void 01975 calcSparseBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 01976 Size imsize, const SparseMat& hist, int dims, const float** _ranges, 01977 const double* _uniranges, float scale, bool uniform ) 01978 { 01979 T** ptrs = (T**)&_ptrs[0]; 01980 const int* deltas = &_deltas[0]; 01981 int i, x; 01982 BT* bproj = (BT*)_ptrs[dims]; 01983 int bpstep = _deltas[dims*2 + 1]; 01984 const int* size = hist.hdr->size; 01985 int idx[CV_MAX_DIM]; 01986 const SparseMat_<float>& hist_ = (const SparseMat_<float>&)hist; 01987 01988 if( uniform ) 01989 { 01990 const double* uniranges = &_uniranges[0]; 01991 for( ; imsize.height--; bproj += bpstep ) 01992 { 01993 for( x = 0; x < imsize.width; x++ ) 01994 { 01995 for( i = 0; i < dims; i++ ) 01996 { 01997 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); 01998 if( (unsigned)idx[i] >= (unsigned)size[i] ) 01999 break; 02000 ptrs[i] += deltas[i*2]; 02001 } 02002 02003 if( i == dims ) 02004 bproj[x] = saturate_cast<BT>(hist_(idx)*scale); 02005 else 02006 { 02007 bproj[x] = 0; 02008 for( ; i < dims; i++ ) 02009 ptrs[i] += deltas[i*2]; 02010 } 02011 } 02012 for( i = 0; i < dims; i++ ) 02013 ptrs[i] += deltas[i*2 + 1]; 02014 } 02015 } 02016 else 02017 { 02018 // non-uniform histogram 02019 const float* ranges[CV_MAX_DIM]; 02020 for( i = 0; i < dims; i++ ) 02021 ranges[i] = &_ranges[i][0]; 02022 02023 for( ; imsize.height--; bproj += bpstep ) 02024 { 02025 for( x = 0; x < imsize.width; x++ ) 02026 { 02027 for( i = 0; i < dims; i++ ) 02028 { 02029 float v = (float)*ptrs[i]; 02030 const float* R = ranges[i]; 02031 int j = -1, sz = size[i]; 02032 02033 while( v >= R[j+1] && ++j < sz ) 02034 ; // nop 02035 02036 if( (unsigned)j >= (unsigned)sz ) 02037 break; 02038 idx[i] = j; 02039 ptrs[i] += deltas[i*2]; 02040 } 02041 02042 if( i == dims ) 02043 bproj[x] = saturate_cast<BT>(hist_(idx)*scale); 02044 else 02045 { 02046 bproj[x] = 0; 02047 for( ; i < dims; i++ ) 02048 ptrs[i] += deltas[i*2]; 02049 } 02050 } 02051 02052 for( i = 0; i < dims; i++ ) 02053 ptrs[i] += deltas[i*2 + 1]; 02054 } 02055 } 02056 } 02057 02058 02059 static void 02060 calcSparseBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas, 02061 Size imsize, const SparseMat& hist, int dims, const float** _ranges, 02062 const double* _uniranges, float scale, bool uniform ) 02063 { 02064 uchar** ptrs = &_ptrs[0]; 02065 const int* deltas = &_deltas[0]; 02066 int i, x; 02067 uchar* bproj = _ptrs[dims]; 02068 int bpstep = _deltas[dims*2 + 1]; 02069 std::vector<size_t> _tab; 02070 int idx[CV_MAX_DIM]; 02071 02072 calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab ); 02073 const size_t* tab = &_tab[0]; 02074 02075 for( ; imsize.height--; bproj += bpstep ) 02076 { 02077 for( x = 0; x < imsize.width; x++ ) 02078 { 02079 for( i = 0; i < dims; i++ ) 02080 { 02081 size_t hidx = tab[*ptrs[i] + i*256]; 02082 if( hidx >= OUT_OF_RANGE ) 02083 break; 02084 idx[i] = (int)hidx; 02085 ptrs[i] += deltas[i*2]; 02086 } 02087 02088 if( i == dims ) 02089 bproj[x] = saturate_cast<uchar>(hist.value<float>(idx)*scale); 02090 else 02091 { 02092 bproj[x] = 0; 02093 for( ; i < dims; i++ ) 02094 ptrs[i] += deltas[i*2]; 02095 } 02096 } 02097 for( i = 0; i < dims; i++ ) 02098 ptrs[i] += deltas[i*2 + 1]; 02099 } 02100 } 02101 02102 } 02103 02104 void cv::calcBackProject( const Mat* images, int nimages, const int* channels, 02105 const SparseMat& hist, OutputArray _backProject, 02106 const float** ranges, double scale, bool uniform ) 02107 { 02108 std::vector<uchar*> ptrs; 02109 std::vector<int> deltas; 02110 std::vector<double> uniranges; 02111 Size imsize; 02112 int dims = hist.dims(); 02113 02114 CV_Assert( dims > 0 ); 02115 _backProject.create( images[0].size(), images[0].depth() ); 02116 Mat backProject = _backProject.getMat(); 02117 histPrepareImages( images, nimages, channels, backProject, 02118 dims, hist.hdr->size, ranges, 02119 uniform, ptrs, deltas, imsize, uniranges ); 02120 const double* _uniranges = uniform ? &uniranges[0] : 0; 02121 02122 int depth = images[0].depth(); 02123 if( depth == CV_8U ) 02124 calcSparseBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, 02125 _uniranges, (float)scale, uniform); 02126 else if( depth == CV_16U ) 02127 calcSparseBackProj_<ushort, ushort>(ptrs, deltas, imsize, hist, dims, ranges, 02128 _uniranges, (float)scale, uniform ); 02129 else if( depth == CV_32F ) 02130 calcSparseBackProj_<float, float>(ptrs, deltas, imsize, hist, dims, ranges, 02131 _uniranges, (float)scale, uniform ); 02132 else 02133 CV_Error(CV_StsUnsupportedFormat, ""); 02134 } 02135 02136 #ifdef HAVE_OPENCL 02137 02138 namespace cv { 02139 02140 static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx) 02141 { 02142 int totalChannels = 0; 02143 for (size_t i = 0, size = um.size(); i < size; ++i) 02144 { 02145 int ccn = um[i].channels(); 02146 totalChannels += ccn; 02147 02148 if (totalChannels == cn) 02149 { 02150 idx = (int)(i + 1); 02151 cnidx = 0; 02152 return; 02153 } 02154 else if (totalChannels > cn) 02155 { 02156 idx = (int)i; 02157 cnidx = i == 0 ? cn : (cn - totalChannels + ccn); 02158 return; 02159 } 02160 } 02161 02162 idx = cnidx = -1; 02163 } 02164 02165 static bool ocl_calcBackProject( InputArrayOfArrays _images, std::vector<int> channels, 02166 InputArray _hist, OutputArray _dst, 02167 const std::vector<float>& ranges, 02168 float scale, size_t histdims ) 02169 { 02170 std::vector<UMat> images; 02171 _images.getUMatVector(images); 02172 02173 size_t nimages = images.size(), totalcn = images[0].channels(); 02174 02175 CV_Assert(nimages > 0); 02176 Size size = images[0].size(); 02177 int depth = images[0].depth(); 02178 02179 //kernels are valid for this type only 02180 if (depth != CV_8U) 02181 return false; 02182 02183 for (size_t i = 1; i < nimages; ++i) 02184 { 02185 const UMat & m = images[i]; 02186 totalcn += m.channels(); 02187 CV_Assert(size == m.size() && depth == m.depth()); 02188 } 02189 02190 std::sort(channels.begin(), channels.end()); 02191 for (size_t i = 0; i < histdims; ++i) 02192 CV_Assert(channels[i] < (int)totalcn); 02193 02194 if (histdims == 1) 02195 { 02196 int idx, cnidx; 02197 getUMatIndex(images, channels[0], idx, cnidx); 02198 CV_Assert(idx >= 0); 02199 UMat im = images[idx]; 02200 02201 String opts = format("-D histdims=1 -D scn=%d", im.channels()); 02202 ocl::Kernel lutk("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts); 02203 if (lutk.empty()) 02204 return false; 02205 02206 size_t lsize = 256; 02207 UMat lut(1, (int)lsize, CV_32SC1), hist = _hist.getUMat(), uranges(ranges, true); 02208 02209 lutk.args(ocl::KernelArg::ReadOnlyNoSize(hist), hist.rows, 02210 ocl::KernelArg::PtrWriteOnly(lut), scale, ocl::KernelArg::PtrReadOnly(uranges)); 02211 if (!lutk.run(1, &lsize, NULL, false)) 02212 return false; 02213 02214 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts); 02215 if (mapk.empty()) 02216 return false; 02217 02218 _dst.create(size, depth); 02219 UMat dst = _dst.getUMat(); 02220 02221 im.offset += cnidx; 02222 mapk.args(ocl::KernelArg::ReadOnlyNoSize(im), ocl::KernelArg::PtrReadOnly(lut), 02223 ocl::KernelArg::WriteOnly(dst)); 02224 02225 size_t globalsize[2] = { (size_t)size.width, (size_t)size.height }; 02226 return mapk.run(2, globalsize, NULL, false); 02227 } 02228 else if (histdims == 2) 02229 { 02230 int idx0, idx1, cnidx0, cnidx1; 02231 getUMatIndex(images, channels[0], idx0, cnidx0); 02232 getUMatIndex(images, channels[1], idx1, cnidx1); 02233 CV_Assert(idx0 >= 0 && idx1 >= 0); 02234 UMat im0 = images[idx0], im1 = images[idx1]; 02235 02236 // Lut for the first dimension 02237 String opts = format("-D histdims=2 -D scn1=%d -D scn2=%d", im0.channels(), im1.channels()); 02238 ocl::Kernel lutk1("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts); 02239 if (lutk1.empty()) 02240 return false; 02241 02242 size_t lsize = 256; 02243 UMat lut(1, (int)lsize<<1, CV_32SC1), uranges(ranges, true), hist = _hist.getUMat(); 02244 02245 lutk1.args(hist.rows, ocl::KernelArg::PtrWriteOnly(lut), (int)0, ocl::KernelArg::PtrReadOnly(uranges), (int)0); 02246 if (!lutk1.run(1, &lsize, NULL, false)) 02247 return false; 02248 02249 // lut for the second dimension 02250 ocl::Kernel lutk2("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts); 02251 if (lutk2.empty()) 02252 return false; 02253 02254 lut.offset += lsize * sizeof(int); 02255 lutk2.args(hist.cols, ocl::KernelArg::PtrWriteOnly(lut), (int)256, ocl::KernelArg::PtrReadOnly(uranges), (int)2); 02256 if (!lutk2.run(1, &lsize, NULL, false)) 02257 return false; 02258 02259 // perform lut 02260 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts); 02261 if (mapk.empty()) 02262 return false; 02263 02264 _dst.create(size, depth); 02265 UMat dst = _dst.getUMat(); 02266 02267 im0.offset += cnidx0; 02268 im1.offset += cnidx1; 02269 mapk.args(ocl::KernelArg::ReadOnlyNoSize(im0), ocl::KernelArg::ReadOnlyNoSize(im1), 02270 ocl::KernelArg::ReadOnlyNoSize(hist), ocl::KernelArg::PtrReadOnly(lut), scale, ocl::KernelArg::WriteOnly(dst)); 02271 02272 size_t globalsize[2] = { (size_t)size.width, (size_t)size.height }; 02273 return mapk.run(2, globalsize, NULL, false); 02274 } 02275 return false; 02276 } 02277 02278 } 02279 02280 #endif 02281 02282 void cv::calcBackProject( InputArrayOfArrays images, const std::vector<int>& channels, 02283 InputArray hist, OutputArray dst, 02284 const std::vector<float>& ranges, 02285 double scale ) 02286 { 02287 #ifdef HAVE_OPENCL 02288 Size histSize = hist.size(); 02289 bool _1D = histSize.height == 1 || histSize.width == 1; 02290 size_t histdims = _1D ? 1 : hist.dims(); 02291 #endif 02292 02293 #ifdef HAVE_OPENCL 02294 CV_OCL_RUN(dst.isUMat() && hist.type() == CV_32FC1 && 02295 histdims <= 2 && ranges.size() == histdims * 2 && histdims == channels.size(), 02296 ocl_calcBackProject(images, channels, hist, dst, ranges, (float)scale, histdims)) 02297 #endif 02298 02299 Mat H0 = hist.getMat(), H; 02300 int hcn = H0.channels(); 02301 02302 if( hcn > 1 ) 02303 { 02304 CV_Assert( H0.isContinuous() ); 02305 int hsz[CV_CN_MAX+1]; 02306 memcpy(hsz, &H0.size[0], H0.dims*sizeof(hsz[0])); 02307 hsz[H0.dims] = hcn; 02308 H = Mat(H0.dims+1, hsz, H0.depth(), H0.ptr()); 02309 } 02310 else 02311 H = H0; 02312 02313 bool _1d = H.rows == 1 || H.cols == 1; 02314 int i, dims = H.dims, rsz = (int)ranges.size(), csz = (int)channels.size(); 02315 int nimages = (int)images.total(); 02316 02317 CV_Assert(nimages > 0); 02318 CV_Assert(rsz == dims*2 || (rsz == 2 && _1d) || (rsz == 0 && images.depth(0) == CV_8U)); 02319 CV_Assert(csz == 0 || csz == dims || (csz == 1 && _1d)); 02320 02321 float* _ranges[CV_MAX_DIM]; 02322 if( rsz > 0 ) 02323 { 02324 for( i = 0; i < rsz/2; i++ ) 02325 _ranges[i] = (float*)&ranges[i*2]; 02326 } 02327 02328 AutoBuffer<Mat> buf(nimages); 02329 for( i = 0; i < nimages; i++ ) 02330 buf[i] = images.getMat(i); 02331 02332 calcBackProject(&buf[0], nimages, csz ? &channels[0] : 0, 02333 hist, dst, rsz ? (const float**)_ranges : 0, scale, true); 02334 } 02335 02336 02337 ////////////////// C O M P A R E H I S T O G R A M S //////////////////////// 02338 02339 double cv::compareHist( InputArray _H1, InputArray _H2, int method ) 02340 { 02341 Mat H1 = _H1.getMat(), H2 = _H2.getMat(); 02342 const Mat* arrays[] = {&H1, &H2, 0}; 02343 Mat planes[2]; 02344 NAryMatIterator it(arrays, planes); 02345 double result = 0; 02346 int j, len = (int)it.size; 02347 02348 CV_Assert( H1.type() == H2.type() && H1.depth() == CV_32F ); 02349 02350 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0; 02351 02352 CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() ); 02353 02354 #if CV_SSE2 02355 bool haveSIMD = checkHardwareSupport(CV_CPU_SSE2); 02356 #endif 02357 02358 for( size_t i = 0; i < it.nplanes; i++, ++it ) 02359 { 02360 const float* h1 = it.planes[0].ptr<float>(); 02361 const float* h2 = it.planes[1].ptr<float>(); 02362 len = it.planes[0].rows*it.planes[0].cols*H1.channels(); 02363 j = 0; 02364 02365 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT)) 02366 { 02367 for( ; j < len; j++ ) 02368 { 02369 double a = h1[j] - h2[j]; 02370 double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j]; 02371 if( fabs(b) > DBL_EPSILON ) 02372 result += a*a/b; 02373 } 02374 } 02375 else if( method == CV_COMP_CORREL ) 02376 { 02377 #if CV_SSE2 02378 if (haveSIMD) 02379 { 02380 __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1; 02381 __m128d v_s11 = v_s1, v_s22 = v_s1, v_s12 = v_s1; 02382 02383 for ( ; j <= len - 4; j += 4) 02384 { 02385 __m128 v_a = _mm_loadu_ps(h1 + j); 02386 __m128 v_b = _mm_loadu_ps(h2 + j); 02387 02388 // 0-1 02389 __m128d v_ad = _mm_cvtps_pd(v_a); 02390 __m128d v_bd = _mm_cvtps_pd(v_b); 02391 v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); 02392 v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); 02393 v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); 02394 v_s1 = _mm_add_pd(v_s1, v_ad); 02395 v_s2 = _mm_add_pd(v_s2, v_bd); 02396 02397 // 2-3 02398 v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); 02399 v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); 02400 v_s12 = _mm_add_pd(v_s12, _mm_mul_pd(v_ad, v_bd)); 02401 v_s11 = _mm_add_pd(v_s11, _mm_mul_pd(v_ad, v_ad)); 02402 v_s22 = _mm_add_pd(v_s22, _mm_mul_pd(v_bd, v_bd)); 02403 v_s1 = _mm_add_pd(v_s1, v_ad); 02404 v_s2 = _mm_add_pd(v_s2, v_bd); 02405 } 02406 02407 double CV_DECL_ALIGNED(16) ar[10]; 02408 _mm_store_pd(ar, v_s12); 02409 _mm_store_pd(ar + 2, v_s11); 02410 _mm_store_pd(ar + 4, v_s22); 02411 _mm_store_pd(ar + 6, v_s1); 02412 _mm_store_pd(ar + 8, v_s2); 02413 02414 s12 += ar[0] + ar[1]; 02415 s11 += ar[2] + ar[3]; 02416 s22 += ar[4] + ar[5]; 02417 s1 += ar[6] + ar[7]; 02418 s2 += ar[8] + ar[9]; 02419 } 02420 #endif 02421 for( ; j < len; j++ ) 02422 { 02423 double a = h1[j]; 02424 double b = h2[j]; 02425 02426 s12 += a*b; 02427 s1 += a; 02428 s11 += a*a; 02429 s2 += b; 02430 s22 += b*b; 02431 } 02432 } 02433 else if( method == CV_COMP_INTERSECT ) 02434 { 02435 #if CV_NEON 02436 float32x4_t v_result = vdupq_n_f32(0.0f); 02437 for( ; j <= len - 4; j += 4 ) 02438 v_result = vaddq_f32(v_result, vminq_f32(vld1q_f32(h1 + j), vld1q_f32(h2 + j))); 02439 float CV_DECL_ALIGNED(16) ar[4]; 02440 vst1q_f32(ar, v_result); 02441 result += ar[0] + ar[1] + ar[2] + ar[3]; 02442 #elif CV_SSE2 02443 if (haveSIMD) 02444 { 02445 __m128d v_result = _mm_setzero_pd(); 02446 for ( ; j <= len - 4; j += 4) 02447 { 02448 __m128 v_src = _mm_min_ps(_mm_loadu_ps(h1 + j), 02449 _mm_loadu_ps(h2 + j)); 02450 v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); 02451 v_src = _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_src), 8)); 02452 v_result = _mm_add_pd(v_result, _mm_cvtps_pd(v_src)); 02453 } 02454 02455 double CV_DECL_ALIGNED(16) ar[2]; 02456 _mm_store_pd(ar, v_result); 02457 result += ar[0] + ar[1]; 02458 } 02459 #endif 02460 for( ; j < len; j++ ) 02461 result += std::min(h1[j], h2[j]); 02462 } 02463 else if( method == CV_COMP_BHATTACHARYYA ) 02464 { 02465 #if CV_SSE2 02466 if (haveSIMD) 02467 { 02468 __m128d v_s1 = _mm_setzero_pd(), v_s2 = v_s1, v_result = v_s1; 02469 for ( ; j <= len - 4; j += 4) 02470 { 02471 __m128 v_a = _mm_loadu_ps(h1 + j); 02472 __m128 v_b = _mm_loadu_ps(h2 + j); 02473 02474 __m128d v_ad = _mm_cvtps_pd(v_a); 02475 __m128d v_bd = _mm_cvtps_pd(v_b); 02476 v_s1 = _mm_add_pd(v_s1, v_ad); 02477 v_s2 = _mm_add_pd(v_s2, v_bd); 02478 v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); 02479 02480 v_ad = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_a), 8))); 02481 v_bd = _mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(v_b), 8))); 02482 v_s1 = _mm_add_pd(v_s1, v_ad); 02483 v_s2 = _mm_add_pd(v_s2, v_bd); 02484 v_result = _mm_add_pd(v_result, _mm_sqrt_pd(_mm_mul_pd(v_ad, v_bd))); 02485 } 02486 02487 double CV_DECL_ALIGNED(16) ar[6]; 02488 _mm_store_pd(ar, v_s1); 02489 _mm_store_pd(ar + 2, v_s2); 02490 _mm_store_pd(ar + 4, v_result); 02491 s1 += ar[0] + ar[1]; 02492 s2 += ar[2] + ar[3]; 02493 result += ar[4] + ar[5]; 02494 } 02495 #endif 02496 for( ; j < len; j++ ) 02497 { 02498 double a = h1[j]; 02499 double b = h2[j]; 02500 result += std::sqrt(a*b); 02501 s1 += a; 02502 s2 += b; 02503 } 02504 } 02505 else if( method == CV_COMP_KL_DIV ) 02506 { 02507 for( ; j < len; j++ ) 02508 { 02509 double p = h1[j]; 02510 double q = h2[j]; 02511 if( fabs(p) <= DBL_EPSILON ) { 02512 continue; 02513 } 02514 if( fabs(q) <= DBL_EPSILON ) { 02515 q = 1e-10; 02516 } 02517 result += p * std::log( p / q ); 02518 } 02519 } 02520 else 02521 CV_Error( CV_StsBadArg, "Unknown comparison method" ); 02522 } 02523 02524 if( method == CV_COMP_CHISQR_ALT ) 02525 result *= 2; 02526 else if( method == CV_COMP_CORREL ) 02527 { 02528 size_t total = H1.total(); 02529 double scale = 1./total; 02530 double num = s12 - s1*s2*scale; 02531 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); 02532 result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.; 02533 } 02534 else if( method == CV_COMP_BHATTACHARYYA ) 02535 { 02536 s1 *= s2; 02537 s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.; 02538 result = std::sqrt(std::max(1. - result*s1, 0.)); 02539 } 02540 02541 return result; 02542 } 02543 02544 02545 double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) 02546 { 02547 double result = 0; 02548 int i, dims = H1.dims(); 02549 02550 CV_Assert( dims > 0 && dims == H2.dims() && H1.type() == H2.type() && H1.type() == CV_32F ); 02551 for( i = 0; i < dims; i++ ) 02552 CV_Assert( H1.size(i) == H2.size(i) ); 02553 02554 const SparseMat *PH1 = &H1, *PH2 = &H2; 02555 if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV ) 02556 std::swap(PH1, PH2); 02557 02558 SparseMatConstIterator it = PH1->begin(); 02559 int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount(); 02560 02561 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) ) 02562 { 02563 for( i = 0; i < N1; i++, ++it ) 02564 { 02565 float v1 = it.value<float>(); 02566 const SparseMat::Node* node = it.node(); 02567 float v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval); 02568 double a = v1 - v2; 02569 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2; 02570 if( fabs(b) > DBL_EPSILON ) 02571 result += a*a/b; 02572 } 02573 } 02574 else if( method == CV_COMP_CORREL ) 02575 { 02576 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0; 02577 02578 for( i = 0; i < N1; i++, ++it ) 02579 { 02580 double v1 = it.value<float>(); 02581 const SparseMat::Node* node = it.node(); 02582 s12 += v1*PH2->value<float>(node->idx, (size_t*)&node->hashval); 02583 s1 += v1; 02584 s11 += v1*v1; 02585 } 02586 02587 it = PH2->begin(); 02588 for( i = 0; i < N2; i++, ++it ) 02589 { 02590 double v2 = it.value<float>(); 02591 s2 += v2; 02592 s22 += v2*v2; 02593 } 02594 02595 size_t total = 1; 02596 for( i = 0; i < H1.dims(); i++ ) 02597 total *= H1.size(i); 02598 double scale = 1./total; 02599 double num = s12 - s1*s2*scale; 02600 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); 02601 result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.; 02602 } 02603 else if( method == CV_COMP_INTERSECT ) 02604 { 02605 for( i = 0; i < N1; i++, ++it ) 02606 { 02607 float v1 = it.value<float>(); 02608 const SparseMat::Node* node = it.node(); 02609 float v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval); 02610 if( v2 ) 02611 result += std::min(v1, v2); 02612 } 02613 } 02614 else if( method == CV_COMP_BHATTACHARYYA ) 02615 { 02616 double s1 = 0, s2 = 0; 02617 02618 for( i = 0; i < N1; i++, ++it ) 02619 { 02620 double v1 = it.value<float>(); 02621 const SparseMat::Node* node = it.node(); 02622 double v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval); 02623 result += std::sqrt(v1*v2); 02624 s1 += v1; 02625 } 02626 02627 it = PH2->begin(); 02628 for( i = 0; i < N2; i++, ++it ) 02629 s2 += it.value<float>(); 02630 02631 s1 *= s2; 02632 s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.; 02633 result = std::sqrt(std::max(1. - result*s1, 0.)); 02634 } 02635 else if( method == CV_COMP_KL_DIV ) 02636 { 02637 for( i = 0; i < N1; i++, ++it ) 02638 { 02639 double v1 = it.value<float>(); 02640 const SparseMat::Node* node = it.node(); 02641 double v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval); 02642 if( !v2 ) 02643 v2 = 1e-10; 02644 result += v1 * std::log( v1 / v2 ); 02645 } 02646 } 02647 else 02648 CV_Error( CV_StsBadArg, "Unknown comparison method" ); 02649 02650 if( method == CV_COMP_CHISQR_ALT ) 02651 result *= 2; 02652 02653 return result; 02654 } 02655 02656 02657 const int CV_HIST_DEFAULT_TYPE = CV_32F; 02658 02659 /* Creates new histogram */ 02660 CvHistogram * 02661 cvCreateHist( int dims, int *sizes, CvHistType type, float** ranges, int uniform ) 02662 { 02663 CvHistogram *hist = 0; 02664 02665 if( (unsigned)dims > CV_MAX_DIM ) 02666 CV_Error( CV_BadOrder, "Number of dimensions is out of range" ); 02667 02668 if( !sizes ) 02669 CV_Error( CV_HeaderIsNull, "Null <sizes> pointer" ); 02670 02671 hist = (CvHistogram *)cvAlloc( sizeof( CvHistogram )); 02672 hist->type = CV_HIST_MAGIC_VAL + ((int)type & 1); 02673 if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG; 02674 hist->thresh2 = 0; 02675 hist->bins = 0; 02676 if( type == CV_HIST_ARRAY ) 02677 { 02678 hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes, 02679 CV_HIST_DEFAULT_TYPE ); 02680 cvCreateData( hist->bins ); 02681 } 02682 else if( type == CV_HIST_SPARSE ) 02683 hist->bins = cvCreateSparseMat( dims, sizes, CV_HIST_DEFAULT_TYPE ); 02684 else 02685 CV_Error( CV_StsBadArg, "Invalid histogram type" ); 02686 02687 if( ranges ) 02688 cvSetHistBinRanges( hist, ranges, uniform ); 02689 02690 return hist; 02691 } 02692 02693 02694 /* Creates histogram wrapping header for given array */ 02695 CV_IMPL CvHistogram* 02696 cvMakeHistHeaderForArray( int dims, int *sizes, CvHistogram *hist, 02697 float *data, float **ranges, int uniform ) 02698 { 02699 if( !hist ) 02700 CV_Error( CV_StsNullPtr, "Null histogram header pointer" ); 02701 02702 if( !data ) 02703 CV_Error( CV_StsNullPtr, "Null data pointer" ); 02704 02705 hist->thresh2 = 0; 02706 hist->type = CV_HIST_MAGIC_VAL; 02707 hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes, CV_HIST_DEFAULT_TYPE, data ); 02708 02709 if( ranges ) 02710 { 02711 if( !uniform ) 02712 CV_Error( CV_StsBadArg, "Only uniform bin ranges can be used here " 02713 "(to avoid memory allocation)" ); 02714 cvSetHistBinRanges( hist, ranges, uniform ); 02715 } 02716 02717 return hist; 02718 } 02719 02720 02721 CV_IMPL void 02722 cvReleaseHist( CvHistogram **hist ) 02723 { 02724 if( !hist ) 02725 CV_Error( CV_StsNullPtr, "" ); 02726 02727 if( *hist ) 02728 { 02729 CvHistogram* temp = *hist; 02730 02731 if( !CV_IS_HIST(temp)) 02732 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 02733 *hist = 0; 02734 02735 if( CV_IS_SPARSE_HIST( temp )) 02736 cvReleaseSparseMat( (CvSparseMat**)&temp->bins ); 02737 else 02738 { 02739 cvReleaseData( temp->bins ); 02740 temp->bins = 0; 02741 } 02742 02743 if( temp->thresh2 ) 02744 cvFree( &temp->thresh2 ); 02745 cvFree( &temp ); 02746 } 02747 } 02748 02749 CV_IMPL void 02750 cvClearHist( CvHistogram *hist ) 02751 { 02752 if( !CV_IS_HIST(hist) ) 02753 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 02754 cvZero( hist->bins ); 02755 } 02756 02757 02758 // Clears histogram bins that are below than threshold 02759 CV_IMPL void 02760 cvThreshHist( CvHistogram* hist, double thresh ) 02761 { 02762 if( !CV_IS_HIST(hist) ) 02763 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 02764 02765 if( !CV_IS_SPARSE_MAT(hist->bins) ) 02766 { 02767 CvMat mat; 02768 cvGetMat( hist->bins, &mat, 0, 1 ); 02769 cvThreshold( &mat, &mat, thresh, 0, CV_THRESH_TOZERO ); 02770 } 02771 else 02772 { 02773 CvSparseMat* mat = (CvSparseMat*)hist->bins; 02774 CvSparseMatIterator iterator; 02775 CvSparseNode *node; 02776 02777 for( node = cvInitSparseMatIterator( mat, &iterator ); 02778 node != 0; node = cvGetNextSparseNode( &iterator )) 02779 { 02780 float* val = (float*)CV_NODE_VAL( mat, node ); 02781 if( *val <= thresh ) 02782 *val = 0; 02783 } 02784 } 02785 } 02786 02787 02788 // Normalizes histogram (make sum of the histogram bins == factor) 02789 CV_IMPL void 02790 cvNormalizeHist( CvHistogram* hist, double factor ) 02791 { 02792 double sum = 0; 02793 02794 if( !CV_IS_HIST(hist) ) 02795 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 02796 02797 if( !CV_IS_SPARSE_HIST(hist) ) 02798 { 02799 CvMat mat; 02800 cvGetMat( hist->bins, &mat, 0, 1 ); 02801 sum = cvSum( &mat ).val[0]; 02802 if( fabs(sum) < DBL_EPSILON ) 02803 sum = 1; 02804 cvScale( &mat, &mat, factor/sum, 0 ); 02805 } 02806 else 02807 { 02808 CvSparseMat* mat = (CvSparseMat*)hist->bins; 02809 CvSparseMatIterator iterator; 02810 CvSparseNode *node; 02811 float scale; 02812 02813 for( node = cvInitSparseMatIterator( mat, &iterator ); 02814 node != 0; node = cvGetNextSparseNode( &iterator )) 02815 { 02816 sum += *(float*)CV_NODE_VAL(mat,node); 02817 } 02818 02819 if( fabs(sum) < DBL_EPSILON ) 02820 sum = 1; 02821 scale = (float)(factor/sum); 02822 02823 for( node = cvInitSparseMatIterator( mat, &iterator ); 02824 node != 0; node = cvGetNextSparseNode( &iterator )) 02825 { 02826 *(float*)CV_NODE_VAL(mat,node) *= scale; 02827 } 02828 } 02829 } 02830 02831 02832 // Retrieves histogram global min, max and their positions 02833 CV_IMPL void 02834 cvGetMinMaxHistValue( const CvHistogram* hist, 02835 float *value_min, float* value_max, 02836 int* idx_min, int* idx_max ) 02837 { 02838 double minVal, maxVal; 02839 int dims, size[CV_MAX_DIM]; 02840 02841 if( !CV_IS_HIST(hist) ) 02842 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 02843 02844 dims = cvGetDims( hist->bins, size ); 02845 02846 if( !CV_IS_SPARSE_HIST(hist) ) 02847 { 02848 CvMat mat; 02849 CvPoint minPt, maxPt; 02850 02851 cvGetMat( hist->bins, &mat, 0, 1 ); 02852 cvMinMaxLoc( &mat, &minVal, &maxVal, &minPt, &maxPt ); 02853 02854 if( dims == 1 ) 02855 { 02856 if( idx_min ) 02857 *idx_min = minPt.y + minPt.x; 02858 if( idx_max ) 02859 *idx_max = maxPt.y + maxPt.x; 02860 } 02861 else if( dims == 2 ) 02862 { 02863 if( idx_min ) 02864 idx_min[0] = minPt.y, idx_min[1] = minPt.x; 02865 if( idx_max ) 02866 idx_max[0] = maxPt.y, idx_max[1] = maxPt.x; 02867 } 02868 else if( idx_min || idx_max ) 02869 { 02870 int imin = minPt.y*mat.cols + minPt.x; 02871 int imax = maxPt.y*mat.cols + maxPt.x; 02872 02873 for(int i = dims - 1; i >= 0; i-- ) 02874 { 02875 if( idx_min ) 02876 { 02877 int t = imin / size[i]; 02878 idx_min[i] = imin - t*size[i]; 02879 imin = t; 02880 } 02881 02882 if( idx_max ) 02883 { 02884 int t = imax / size[i]; 02885 idx_max[i] = imax - t*size[i]; 02886 imax = t; 02887 } 02888 } 02889 } 02890 } 02891 else 02892 { 02893 CvSparseMat* mat = (CvSparseMat*)hist->bins; 02894 CvSparseMatIterator iterator; 02895 CvSparseNode *node; 02896 int minv = INT_MAX; 02897 int maxv = INT_MIN; 02898 CvSparseNode* minNode = 0; 02899 CvSparseNode* maxNode = 0; 02900 const int *_idx_min = 0, *_idx_max = 0; 02901 Cv32suf m; 02902 02903 for( node = cvInitSparseMatIterator( mat, &iterator ); 02904 node != 0; node = cvGetNextSparseNode( &iterator )) 02905 { 02906 int value = *(int*)CV_NODE_VAL(mat,node); 02907 value = CV_TOGGLE_FLT(value); 02908 if( value < minv ) 02909 { 02910 minv = value; 02911 minNode = node; 02912 } 02913 02914 if( value > maxv ) 02915 { 02916 maxv = value; 02917 maxNode = node; 02918 } 02919 } 02920 02921 if( minNode ) 02922 { 02923 _idx_min = CV_NODE_IDX(mat,minNode); 02924 _idx_max = CV_NODE_IDX(mat,maxNode); 02925 m.i = CV_TOGGLE_FLT(minv); minVal = m.f; 02926 m.i = CV_TOGGLE_FLT(maxv); maxVal = m.f; 02927 } 02928 else 02929 { 02930 minVal = maxVal = 0; 02931 } 02932 02933 for(int i = 0; i < dims; i++ ) 02934 { 02935 if( idx_min ) 02936 idx_min[i] = _idx_min ? _idx_min[i] : -1; 02937 if( idx_max ) 02938 idx_max[i] = _idx_max ? _idx_max[i] : -1; 02939 } 02940 } 02941 02942 if( value_min ) 02943 *value_min = (float)minVal; 02944 02945 if( value_max ) 02946 *value_max = (float)maxVal; 02947 } 02948 02949 02950 // Compares two histograms using one of a few methods 02951 CV_IMPL double 02952 cvCompareHist( const CvHistogram* hist1, 02953 const CvHistogram* hist2, 02954 int method ) 02955 { 02956 int i; 02957 int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1; 02958 02959 if( !CV_IS_HIST(hist1) || !CV_IS_HIST(hist2) ) 02960 CV_Error( CV_StsBadArg, "Invalid histogram header[s]" ); 02961 02962 if( CV_IS_SPARSE_MAT(hist1->bins) != CV_IS_SPARSE_MAT(hist2->bins)) 02963 CV_Error(CV_StsUnmatchedFormats, "One of histograms is sparse and other is not"); 02964 02965 if( !CV_IS_SPARSE_MAT(hist1->bins) ) 02966 { 02967 cv::Mat H1 = cv::cvarrToMat(hist1->bins); 02968 cv::Mat H2 = cv::cvarrToMat(hist2->bins); 02969 return cv::compareHist(H1, H2, method); 02970 } 02971 02972 int dims1 = cvGetDims( hist1->bins, size1 ); 02973 int dims2 = cvGetDims( hist2->bins, size2 ); 02974 02975 if( dims1 != dims2 ) 02976 CV_Error( CV_StsUnmatchedSizes, 02977 "The histograms have different numbers of dimensions" ); 02978 02979 for( i = 0; i < dims1; i++ ) 02980 { 02981 if( size1[i] != size2[i] ) 02982 CV_Error( CV_StsUnmatchedSizes, "The histograms have different sizes" ); 02983 total *= size1[i]; 02984 } 02985 02986 double result = 0; 02987 CvSparseMat* mat1 = (CvSparseMat*)(hist1->bins); 02988 CvSparseMat* mat2 = (CvSparseMat*)(hist2->bins); 02989 CvSparseMatIterator iterator; 02990 CvSparseNode *node1, *node2; 02991 02992 if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV ) 02993 { 02994 CvSparseMat* t; 02995 CV_SWAP( mat1, mat2, t ); 02996 } 02997 02998 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) ) 02999 { 03000 for( node1 = cvInitSparseMatIterator( mat1, &iterator ); 03001 node1 != 0; node1 = cvGetNextSparseNode( &iterator )) 03002 { 03003 double v1 = *(float*)CV_NODE_VAL(mat1,node1); 03004 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 0, 0, &node1->hashval ); 03005 double v2 = node2_data ? *(float*)node2_data : 0.f; 03006 double a = v1 - v2; 03007 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2; 03008 if( fabs(b) > DBL_EPSILON ) 03009 result += a*a/b; 03010 } 03011 } 03012 else if( method == CV_COMP_CORREL ) 03013 { 03014 double s1 = 0, s11 = 0; 03015 double s2 = 0, s22 = 0; 03016 double s12 = 0; 03017 double num, denom2, scale = 1./total; 03018 03019 for( node1 = cvInitSparseMatIterator( mat1, &iterator ); 03020 node1 != 0; node1 = cvGetNextSparseNode( &iterator )) 03021 { 03022 double v1 = *(float*)CV_NODE_VAL(mat1,node1); 03023 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 03024 0, 0, &node1->hashval ); 03025 if( node2_data ) 03026 { 03027 double v2 = *(float*)node2_data; 03028 s12 += v1*v2; 03029 } 03030 s1 += v1; 03031 s11 += v1*v1; 03032 } 03033 03034 for( node2 = cvInitSparseMatIterator( mat2, &iterator ); 03035 node2 != 0; node2 = cvGetNextSparseNode( &iterator )) 03036 { 03037 double v2 = *(float*)CV_NODE_VAL(mat2,node2); 03038 s2 += v2; 03039 s22 += v2*v2; 03040 } 03041 03042 num = s12 - s1*s2*scale; 03043 denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); 03044 result = fabs(denom2) > DBL_EPSILON ? num/sqrt(denom2) : 1; 03045 } 03046 else if( method == CV_COMP_INTERSECT ) 03047 { 03048 for( node1 = cvInitSparseMatIterator( mat1, &iterator ); 03049 node1 != 0; node1 = cvGetNextSparseNode( &iterator )) 03050 { 03051 float v1 = *(float*)CV_NODE_VAL(mat1,node1); 03052 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 03053 0, 0, &node1->hashval ); 03054 if( node2_data ) 03055 { 03056 float v2 = *(float*)node2_data; 03057 if( v1 <= v2 ) 03058 result += v1; 03059 else 03060 result += v2; 03061 } 03062 } 03063 } 03064 else if( method == CV_COMP_BHATTACHARYYA ) 03065 { 03066 double s1 = 0, s2 = 0; 03067 03068 for( node1 = cvInitSparseMatIterator( mat1, &iterator ); 03069 node1 != 0; node1 = cvGetNextSparseNode( &iterator )) 03070 { 03071 double v1 = *(float*)CV_NODE_VAL(mat1,node1); 03072 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 03073 0, 0, &node1->hashval ); 03074 s1 += v1; 03075 if( node2_data ) 03076 { 03077 double v2 = *(float*)node2_data; 03078 result += sqrt(v1 * v2); 03079 } 03080 } 03081 03082 for( node1 = cvInitSparseMatIterator( mat2, &iterator ); 03083 node1 != 0; node1 = cvGetNextSparseNode( &iterator )) 03084 { 03085 double v2 = *(float*)CV_NODE_VAL(mat2,node1); 03086 s2 += v2; 03087 } 03088 03089 s1 *= s2; 03090 s1 = fabs(s1) > FLT_EPSILON ? 1./sqrt(s1) : 1.; 03091 result = 1. - result*s1; 03092 result = sqrt(MAX(result,0.)); 03093 } 03094 else if( method == CV_COMP_KL_DIV ) 03095 { 03096 cv::SparseMat sH1, sH2; 03097 ((const CvSparseMat*)hist1->bins)->copyToSparseMat(sH1); 03098 ((const CvSparseMat*)hist2->bins)->copyToSparseMat(sH2); 03099 result = cv::compareHist( sH1, sH2, CV_COMP_KL_DIV ); 03100 } 03101 else 03102 CV_Error( CV_StsBadArg, "Unknown comparison method" ); 03103 03104 if( method == CV_COMP_CHISQR_ALT ) 03105 result *= 2; 03106 03107 return result; 03108 } 03109 03110 // copies one histogram to another 03111 CV_IMPL void 03112 cvCopyHist( const CvHistogram* src, CvHistogram** _dst ) 03113 { 03114 if( !_dst ) 03115 CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" ); 03116 03117 CvHistogram* dst = *_dst; 03118 03119 if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) ) 03120 CV_Error( CV_StsBadArg, "Invalid histogram header[s]" ); 03121 03122 bool eq = false; 03123 int size1[CV_MAX_DIM]; 03124 bool is_sparse = CV_IS_SPARSE_MAT(src->bins); 03125 int dims1 = cvGetDims( src->bins, size1 ); 03126 03127 if( dst && (is_sparse == CV_IS_SPARSE_MAT(dst->bins))) 03128 { 03129 int size2[CV_MAX_DIM]; 03130 int dims2 = cvGetDims( dst->bins, size2 ); 03131 03132 if( dims1 == dims2 ) 03133 { 03134 int i; 03135 03136 for( i = 0; i < dims1; i++ ) 03137 { 03138 if( size1[i] != size2[i] ) 03139 break; 03140 } 03141 03142 eq = (i == dims1); 03143 } 03144 } 03145 03146 if( !eq ) 03147 { 03148 cvReleaseHist( _dst ); 03149 dst = cvCreateHist( dims1, size1, !is_sparse ? CV_HIST_ARRAY : CV_HIST_SPARSE, 0, 0 ); 03150 *_dst = dst; 03151 } 03152 03153 if( CV_HIST_HAS_RANGES( src )) 03154 { 03155 float* ranges[CV_MAX_DIM]; 03156 float** thresh = 0; 03157 03158 if( CV_IS_UNIFORM_HIST( src )) 03159 { 03160 for( int i = 0; i < dims1; i++ ) 03161 ranges[i] = (float*)src->thresh[i]; 03162 03163 thresh = ranges; 03164 } 03165 else 03166 { 03167 thresh = src->thresh2; 03168 } 03169 03170 cvSetHistBinRanges( dst, thresh, CV_IS_UNIFORM_HIST(src)); 03171 } 03172 03173 cvCopy( src->bins, dst->bins ); 03174 } 03175 03176 03177 // Sets a value range for every histogram bin 03178 CV_IMPL void 03179 cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform ) 03180 { 03181 int dims, size[CV_MAX_DIM], total = 0; 03182 int i, j; 03183 03184 if( !ranges ) 03185 CV_Error( CV_StsNullPtr, "NULL ranges pointer" ); 03186 03187 if( !CV_IS_HIST(hist) ) 03188 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 03189 03190 dims = cvGetDims( hist->bins, size ); 03191 for( i = 0; i < dims; i++ ) 03192 total += size[i]+1; 03193 03194 if( uniform ) 03195 { 03196 for( i = 0; i < dims; i++ ) 03197 { 03198 if( !ranges[i] ) 03199 CV_Error( CV_StsNullPtr, "One of <ranges> elements is NULL" ); 03200 hist->thresh[i][0] = ranges[i][0]; 03201 hist->thresh[i][1] = ranges[i][1]; 03202 } 03203 03204 hist->type |= CV_HIST_UNIFORM_FLAG + CV_HIST_RANGES_FLAG; 03205 } 03206 else 03207 { 03208 float* dim_ranges; 03209 03210 if( !hist->thresh2 ) 03211 { 03212 hist->thresh2 = (float**)cvAlloc( 03213 dims*sizeof(hist->thresh2[0])+ 03214 total*sizeof(hist->thresh2[0][0])); 03215 } 03216 dim_ranges = (float*)(hist->thresh2 + dims); 03217 03218 for( i = 0; i < dims; i++ ) 03219 { 03220 float val0 = -FLT_MAX; 03221 03222 if( !ranges[i] ) 03223 CV_Error( CV_StsNullPtr, "One of <ranges> elements is NULL" ); 03224 03225 for( j = 0; j <= size[i]; j++ ) 03226 { 03227 float val = ranges[i][j]; 03228 if( val <= val0 ) 03229 CV_Error(CV_StsOutOfRange, "Bin ranges should go in ascenting order"); 03230 val0 = dim_ranges[j] = val; 03231 } 03232 03233 hist->thresh2[i] = dim_ranges; 03234 dim_ranges += size[i] + 1; 03235 } 03236 03237 hist->type |= CV_HIST_RANGES_FLAG; 03238 hist->type &= ~CV_HIST_UNIFORM_FLAG; 03239 } 03240 } 03241 03242 03243 CV_IMPL void 03244 cvCalcArrHist( CvArr** img, CvHistogram* hist, int accumulate, const CvArr* mask ) 03245 { 03246 if( !CV_IS_HIST(hist)) 03247 CV_Error( CV_StsBadArg, "Bad histogram pointer" ); 03248 03249 if( !img ) 03250 CV_Error( CV_StsNullPtr, "Null double array pointer" ); 03251 03252 int size[CV_MAX_DIM]; 03253 int i, dims = cvGetDims( hist->bins, size); 03254 bool uniform = CV_IS_UNIFORM_HIST(hist); 03255 03256 std::vector<cv::Mat> images(dims); 03257 for( i = 0; i < dims; i++ ) 03258 images[i] = cv::cvarrToMat(img[i]); 03259 03260 cv::Mat _mask; 03261 if( mask ) 03262 _mask = cv::cvarrToMat(mask); 03263 03264 const float* uranges[CV_MAX_DIM] = {0}; 03265 const float** ranges = 0; 03266 03267 if( hist->type & CV_HIST_RANGES_FLAG ) 03268 { 03269 ranges = (const float**)hist->thresh2; 03270 if( uniform ) 03271 { 03272 for( i = 0; i < dims; i++ ) 03273 uranges[i] = &hist->thresh[i][0]; 03274 ranges = uranges; 03275 } 03276 } 03277 03278 if( !CV_IS_SPARSE_HIST(hist) ) 03279 { 03280 cv::Mat H = cv::cvarrToMat(hist->bins); 03281 cv::calcHist ( &images[0], (int)images.size(), 0, _mask, 03282 H, cvGetDims(hist->bins), H.size, ranges, uniform, accumulate != 0 ); 03283 } 03284 else 03285 { 03286 CvSparseMat* sparsemat = (CvSparseMat*)hist->bins; 03287 03288 if( !accumulate ) 03289 cvZero( hist->bins ); 03290 cv::SparseMat sH; 03291 sparsemat->copyToSparseMat(sH); 03292 cv::calcHist ( &images[0], (int)images.size(), 0, _mask, sH, sH.dims(), 03293 sH.dims() > 0 ? sH.hdr->size : 0, ranges, uniform, accumulate != 0, true ); 03294 03295 if( accumulate ) 03296 cvZero( sparsemat ); 03297 03298 cv::SparseMatConstIterator it = sH.begin(); 03299 int nz = (int)sH.nzcount(); 03300 for( i = 0; i < nz; i++, ++it ) 03301 *(float*)cvPtrND(sparsemat, it.node()->idx, 0, -2) = (float)*(const int*)it.ptr; 03302 } 03303 } 03304 03305 03306 CV_IMPL void 03307 cvCalcArrBackProject( CvArr** img, CvArr* dst, const CvHistogram* hist ) 03308 { 03309 if( !CV_IS_HIST(hist)) 03310 CV_Error( CV_StsBadArg, "Bad histogram pointer" ); 03311 03312 if( !img ) 03313 CV_Error( CV_StsNullPtr, "Null double array pointer" ); 03314 03315 int size[CV_MAX_DIM]; 03316 int i, dims = cvGetDims( hist->bins, size ); 03317 03318 bool uniform = CV_IS_UNIFORM_HIST(hist); 03319 const float* uranges[CV_MAX_DIM] = {0}; 03320 const float** ranges = 0; 03321 03322 if( hist->type & CV_HIST_RANGES_FLAG ) 03323 { 03324 ranges = (const float**)hist->thresh2; 03325 if( uniform ) 03326 { 03327 for( i = 0; i < dims; i++ ) 03328 uranges[i] = &hist->thresh[i][0]; 03329 ranges = uranges; 03330 } 03331 } 03332 03333 std::vector<cv::Mat> images(dims); 03334 for( i = 0; i < dims; i++ ) 03335 images[i] = cv::cvarrToMat(img[i]); 03336 03337 cv::Mat _dst = cv::cvarrToMat(dst); 03338 03339 CV_Assert( _dst.size() == images[0].size() && _dst.depth() == images[0].depth() ); 03340 03341 if( !CV_IS_SPARSE_HIST(hist) ) 03342 { 03343 cv::Mat H = cv::cvarrToMat(hist->bins); 03344 cv::calcBackProject( &images[0], (int)images.size(), 03345 0, H, _dst, ranges, 1, uniform ); 03346 } 03347 else 03348 { 03349 cv::SparseMat sH; 03350 ((const CvSparseMat*)hist->bins)->copyToSparseMat(sH); 03351 cv::calcBackProject( &images[0], (int)images.size(), 03352 0, sH, _dst, ranges, 1, uniform ); 03353 } 03354 } 03355 03356 03357 ////////////////////// B A C K P R O J E C T P A T C H ///////////////////////// 03358 03359 CV_IMPL void 03360 cvCalcArrBackProjectPatch( CvArr** arr, CvArr* dst, CvSize patch_size, CvHistogram* hist, 03361 int method, double norm_factor ) 03362 { 03363 CvHistogram* model = 0; 03364 03365 IplImage imgstub[CV_MAX_DIM], *img[CV_MAX_DIM]; 03366 IplROI roi; 03367 CvMat dststub, *dstmat; 03368 int i, dims; 03369 int x, y; 03370 CvSize size; 03371 03372 if( !CV_IS_HIST(hist)) 03373 CV_Error( CV_StsBadArg, "Bad histogram pointer" ); 03374 03375 if( !arr ) 03376 CV_Error( CV_StsNullPtr, "Null double array pointer" ); 03377 03378 if( norm_factor <= 0 ) 03379 CV_Error( CV_StsOutOfRange, 03380 "Bad normalization factor (set it to 1.0 if unsure)" ); 03381 03382 if( patch_size.width <= 0 || patch_size.height <= 0 ) 03383 CV_Error( CV_StsBadSize, "The patch width and height must be positive" ); 03384 03385 dims = cvGetDims( hist->bins ); 03386 cvNormalizeHist( hist, norm_factor ); 03387 03388 for( i = 0; i < dims; i++ ) 03389 { 03390 CvMat stub, *mat; 03391 mat = cvGetMat( arr[i], &stub, 0, 0 ); 03392 img[i] = cvGetImage( mat, &imgstub[i] ); 03393 img[i]->roi = &roi; 03394 } 03395 03396 dstmat = cvGetMat( dst, &dststub, 0, 0 ); 03397 if( CV_MAT_TYPE( dstmat->type ) != CV_32FC1 ) 03398 CV_Error( CV_StsUnsupportedFormat, "Resultant image must have 32fC1 type" ); 03399 03400 if( dstmat->cols != img[0]->width - patch_size.width + 1 || 03401 dstmat->rows != img[0]->height - patch_size.height + 1 ) 03402 CV_Error( CV_StsUnmatchedSizes, 03403 "The output map must be (W-w+1 x H-h+1), " 03404 "where the input images are (W x H) each and the patch is (w x h)" ); 03405 03406 cvCopyHist( hist, &model ); 03407 03408 size = cvGetMatSize(dstmat); 03409 roi.coi = 0; 03410 roi.width = patch_size.width; 03411 roi.height = patch_size.height; 03412 03413 for( y = 0; y < size.height; y++ ) 03414 { 03415 for( x = 0; x < size.width; x++ ) 03416 { 03417 double result; 03418 roi.xOffset = x; 03419 roi.yOffset = y; 03420 03421 cvCalcHist ( img, model ); 03422 cvNormalizeHist( model, norm_factor ); 03423 result = cvCompareHist( model, hist, method ); 03424 CV_MAT_ELEM( *dstmat, float, y, x ) = (float)result; 03425 } 03426 } 03427 03428 cvReleaseHist( &model ); 03429 } 03430 03431 03432 // Calculates Bayes probabilistic histograms 03433 CV_IMPL void 03434 cvCalcBayesianProb( CvHistogram** src, int count, CvHistogram** dst ) 03435 { 03436 int i; 03437 03438 if( !src || !dst ) 03439 CV_Error( CV_StsNullPtr, "NULL histogram array pointer" ); 03440 03441 if( count < 2 ) 03442 CV_Error( CV_StsOutOfRange, "Too small number of histograms" ); 03443 03444 for( i = 0; i < count; i++ ) 03445 { 03446 if( !CV_IS_HIST(src[i]) || !CV_IS_HIST(dst[i]) ) 03447 CV_Error( CV_StsBadArg, "Invalid histogram header" ); 03448 03449 if( !CV_IS_MATND(src[i]->bins) || !CV_IS_MATND(dst[i]->bins) ) 03450 CV_Error( CV_StsBadArg, "The function supports dense histograms only" ); 03451 } 03452 03453 cvZero( dst[0]->bins ); 03454 // dst[0] = src[0] + ... + src[count-1] 03455 for( i = 0; i < count; i++ ) 03456 cvAdd( src[i]->bins, dst[0]->bins, dst[0]->bins ); 03457 03458 cvDiv( 0, dst[0]->bins, dst[0]->bins ); 03459 03460 // dst[i] = src[i]*(1/dst[0]) 03461 for( i = count - 1; i >= 0; i-- ) 03462 cvMul( src[i]->bins, dst[0]->bins, dst[i]->bins ); 03463 } 03464 03465 03466 CV_IMPL void 03467 cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask, 03468 CvHistogram* hist_dens, double scale ) 03469 { 03470 if( scale <= 0 ) 03471 CV_Error( CV_StsOutOfRange, "scale must be positive" ); 03472 03473 if( !CV_IS_HIST(hist) || !CV_IS_HIST(hist_mask) || !CV_IS_HIST(hist_dens) ) 03474 CV_Error( CV_StsBadArg, "Invalid histogram pointer[s]" ); 03475 03476 { 03477 CvArr* arrs[] = { hist->bins, hist_mask->bins, hist_dens->bins }; 03478 CvMatND stubs[3]; 03479 CvNArrayIterator iterator; 03480 03481 cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator ); 03482 03483 if( CV_MAT_TYPE(iterator.hdr[0]->type) != CV_32FC1 ) 03484 CV_Error( CV_StsUnsupportedFormat, "All histograms must have 32fC1 type" ); 03485 03486 do 03487 { 03488 const float* srcdata = (const float*)(iterator.ptr[0]); 03489 const float* maskdata = (const float*)(iterator.ptr[1]); 03490 float* dstdata = (float*)(iterator.ptr[2]); 03491 int i; 03492 03493 for( i = 0; i < iterator.size.width; i++ ) 03494 { 03495 float s = srcdata[i]; 03496 float m = maskdata[i]; 03497 if( s > FLT_EPSILON ) 03498 if( m <= s ) 03499 dstdata[i] = (float)(m*scale/s); 03500 else 03501 dstdata[i] = (float)scale; 03502 else 03503 dstdata[i] = (float)0; 03504 } 03505 } 03506 while( cvNextNArraySlice( &iterator )); 03507 } 03508 } 03509 03510 class EqualizeHistCalcHist_Invoker : public cv::ParallelLoopBody 03511 { 03512 public: 03513 enum {HIST_SZ = 256}; 03514 03515 EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, cv::Mutex* histogramLock) 03516 : src_(src), globalHistogram_(histogram), histogramLock_(histogramLock) 03517 { } 03518 03519 void operator()( const cv::Range& rowRange ) const 03520 { 03521 int localHistogram[HIST_SZ] = {0, }; 03522 03523 const size_t sstep = src_.step; 03524 03525 int width = src_.cols; 03526 int height = rowRange.end - rowRange.start; 03527 03528 if (src_.isContinuous()) 03529 { 03530 width *= height; 03531 height = 1; 03532 } 03533 03534 for (const uchar* ptr = src_.ptr<uchar>(rowRange.start); height--; ptr += sstep) 03535 { 03536 int x = 0; 03537 for (; x <= width - 4; x += 4) 03538 { 03539 int t0 = ptr[x], t1 = ptr[x+1]; 03540 localHistogram[t0]++; localHistogram[t1]++; 03541 t0 = ptr[x+2]; t1 = ptr[x+3]; 03542 localHistogram[t0]++; localHistogram[t1]++; 03543 } 03544 03545 for (; x < width; ++x) 03546 localHistogram[ptr[x]]++; 03547 } 03548 03549 cv::AutoLock lock(*histogramLock_); 03550 03551 for( int i = 0; i < HIST_SZ; i++ ) 03552 globalHistogram_[i] += localHistogram[i]; 03553 } 03554 03555 static bool isWorthParallel( const cv::Mat& src ) 03556 { 03557 return ( src.total() >= 640*480 ); 03558 } 03559 03560 private: 03561 EqualizeHistCalcHist_Invoker& operator=(const EqualizeHistCalcHist_Invoker&); 03562 03563 cv::Mat& src_; 03564 int* globalHistogram_; 03565 cv::Mutex* histogramLock_; 03566 }; 03567 03568 class EqualizeHistLut_Invoker : public cv::ParallelLoopBody 03569 { 03570 public: 03571 EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut ) 03572 : src_(src), 03573 dst_(dst), 03574 lut_(lut) 03575 { } 03576 03577 void operator()( const cv::Range& rowRange ) const 03578 { 03579 const size_t sstep = src_.step; 03580 const size_t dstep = dst_.step; 03581 03582 int width = src_.cols; 03583 int height = rowRange.end - rowRange.start; 03584 int* lut = lut_; 03585 03586 if (src_.isContinuous() && dst_.isContinuous()) 03587 { 03588 width *= height; 03589 height = 1; 03590 } 03591 03592 const uchar* sptr = src_.ptr<uchar>(rowRange.start); 03593 uchar* dptr = dst_.ptr<uchar>(rowRange.start); 03594 03595 for (; height--; sptr += sstep, dptr += dstep) 03596 { 03597 int x = 0; 03598 for (; x <= width - 4; x += 4) 03599 { 03600 int v0 = sptr[x]; 03601 int v1 = sptr[x+1]; 03602 int x0 = lut[v0]; 03603 int x1 = lut[v1]; 03604 dptr[x] = (uchar)x0; 03605 dptr[x+1] = (uchar)x1; 03606 03607 v0 = sptr[x+2]; 03608 v1 = sptr[x+3]; 03609 x0 = lut[v0]; 03610 x1 = lut[v1]; 03611 dptr[x+2] = (uchar)x0; 03612 dptr[x+3] = (uchar)x1; 03613 } 03614 03615 for (; x < width; ++x) 03616 dptr[x] = (uchar)lut[sptr[x]]; 03617 } 03618 } 03619 03620 static bool isWorthParallel( const cv::Mat& src ) 03621 { 03622 return ( src.total() >= 640*480 ); 03623 } 03624 03625 private: 03626 EqualizeHistLut_Invoker& operator=(const EqualizeHistLut_Invoker&); 03627 03628 cv::Mat& src_; 03629 cv::Mat& dst_; 03630 int* lut_; 03631 }; 03632 03633 CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr ) 03634 { 03635 cv::equalizeHist(cv::cvarrToMat(srcarr), cv::cvarrToMat(dstarr)); 03636 } 03637 03638 #ifdef HAVE_OPENCL 03639 03640 namespace cv { 03641 03642 static bool ocl_equalizeHist(InputArray _src, OutputArray _dst) 03643 { 03644 const ocl::Device & dev = ocl::Device::getDefault(); 03645 int compunits = dev.maxComputeUnits(); 03646 size_t wgs = dev.maxWorkGroupSize(); 03647 Size size = _src.size(); 03648 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0; 03649 int kercn = dev.isAMD() && use16 ? 16 : std::min(4, ocl::predictOptimalVectorWidth(_src)); 03650 03651 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc, 03652 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D kercn=%d -D T=%s%s", 03653 BINS, compunits, wgs, kercn, 03654 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)), 03655 _src.isContinuous() ? " -D HAVE_SRC_CONT" : "")); 03656 if (k1.empty()) 03657 return false; 03658 03659 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1); 03660 03661 k1.args(ocl::KernelArg::ReadOnly(src), 03662 ocl::KernelArg::PtrWriteOnly(ghist), (int)src.total()); 03663 03664 size_t globalsize = compunits * wgs; 03665 if (!k1.run(1, &globalsize, &wgs, false)) 03666 return false; 03667 03668 wgs = std::min<size_t>(ocl::Device::getDefault().maxWorkGroupSize(), BINS); 03669 UMat lut(1, 256, CV_8UC1); 03670 ocl::Kernel k2("calcLUT", ocl::imgproc::histogram_oclsrc, 03671 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d", 03672 BINS, compunits, (int)wgs)); 03673 k2.args(ocl::KernelArg::PtrWriteOnly(lut), 03674 ocl::KernelArg::PtrReadOnly(ghist), (int)_src.total()); 03675 03676 // calculation of LUT 03677 if (!k2.run(1, &wgs, &wgs, false)) 03678 return false; 03679 03680 // execute LUT transparently 03681 LUT(_src, lut, _dst); 03682 return true; 03683 } 03684 03685 } 03686 03687 #endif 03688 03689 void cv::equalizeHist( InputArray _src, OutputArray _dst ) 03690 { 03691 CV_Assert( _src.type() == CV_8UC1 ); 03692 03693 if (_src.empty()) 03694 return; 03695 03696 #ifdef HAVE_OPENCL 03697 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), 03698 ocl_equalizeHist(_src, _dst)) 03699 #endif 03700 03701 Mat src = _src.getMat(); 03702 _dst.create( src.size(), src.type() ); 03703 Mat dst = _dst.getMat(); 03704 03705 Mutex histogramLockInstance; 03706 03707 const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ; 03708 int hist[hist_sz] = {0,}; 03709 int lut[hist_sz]; 03710 03711 EqualizeHistCalcHist_Invoker calcBody(src, hist, &histogramLockInstance); 03712 EqualizeHistLut_Invoker lutBody(src, dst, lut); 03713 cv::Range heightRange(0, src.rows); 03714 03715 if(EqualizeHistCalcHist_Invoker::isWorthParallel(src)) 03716 parallel_for_(heightRange, calcBody); 03717 else 03718 calcBody(heightRange); 03719 03720 int i = 0; 03721 while (!hist[i]) ++i; 03722 03723 int total = (int)src.total(); 03724 if (hist[i] == total) 03725 { 03726 dst.setTo(i); 03727 return; 03728 } 03729 03730 float scale = (hist_sz - 1.f)/(total - hist[i]); 03731 int sum = 0; 03732 03733 for (lut[i++] = 0; i < hist_sz; ++i) 03734 { 03735 sum += hist[i]; 03736 lut[i] = saturate_cast<uchar>(sum * scale); 03737 } 03738 03739 if(EqualizeHistLut_Invoker::isWorthParallel(src)) 03740 parallel_for_(heightRange, lutBody); 03741 else 03742 lutBody(heightRange); 03743 } 03744 03745 // ---------------------------------------------------------------------- 03746 03747 /* Implementation of RTTI and Generic Functions for CvHistogram */ 03748 #define CV_TYPE_NAME_HIST "opencv-hist" 03749 03750 static int icvIsHist( const void * ptr ) 03751 { 03752 return CV_IS_HIST( ((CvHistogram*)ptr) ); 03753 } 03754 03755 static CvHistogram * icvCloneHist( const CvHistogram * src ) 03756 { 03757 CvHistogram * dst=NULL; 03758 cvCopyHist(src, &dst); 03759 return dst; 03760 } 03761 03762 static void *icvReadHist( CvFileStorage * fs, CvFileNode * node ) 03763 { 03764 CvHistogram * h = 0; 03765 int type = 0; 03766 int is_uniform = 0; 03767 int have_ranges = 0; 03768 03769 h = (CvHistogram *)cvAlloc( sizeof(CvHistogram) ); 03770 03771 type = cvReadIntByName( fs, node, "type", 0 ); 03772 is_uniform = cvReadIntByName( fs, node, "is_uniform", 0 ); 03773 have_ranges = cvReadIntByName( fs, node, "have_ranges", 0 ); 03774 h->type = CV_HIST_MAGIC_VAL | type | 03775 (is_uniform ? CV_HIST_UNIFORM_FLAG : 0) | 03776 (have_ranges ? CV_HIST_RANGES_FLAG : 0); 03777 03778 if(type == CV_HIST_ARRAY) 03779 { 03780 // read histogram bins 03781 CvMatND * mat = (CvMatND *)cvReadByName( fs, node, "mat" ); 03782 int i, sizes[CV_MAX_DIM]; 03783 03784 if(!CV_IS_MATND(mat)) 03785 CV_Error( CV_StsError, "Expected CvMatND"); 03786 03787 for(i=0; i<mat->dims; i++) 03788 sizes[i] = mat->dim[i].size; 03789 03790 cvInitMatNDHeader( &(h->mat), mat->dims, sizes, mat->type, mat->data.ptr ); 03791 h->bins = &(h->mat); 03792 03793 // take ownership of refcount pointer as well 03794 h->mat.refcount = mat->refcount; 03795 03796 // increase refcount so freeing temp header doesn't free data 03797 cvIncRefData( mat ); 03798 03799 // free temporary header 03800 cvReleaseMatND( &mat ); 03801 } 03802 else 03803 { 03804 h->bins = cvReadByName( fs, node, "bins" ); 03805 if(!CV_IS_SPARSE_MAT(h->bins)){ 03806 CV_Error( CV_StsError, "Unknown Histogram type"); 03807 } 03808 } 03809 03810 // read thresholds 03811 if(have_ranges) 03812 { 03813 int i, dims, size[CV_MAX_DIM], total = 0; 03814 CvSeqReader reader; 03815 CvFileNode * thresh_node; 03816 03817 dims = cvGetDims( h->bins, size ); 03818 for( i = 0; i < dims; i++ ) 03819 total += size[i]+1; 03820 03821 thresh_node = cvGetFileNodeByName( fs, node, "thresh" ); 03822 if(!thresh_node) 03823 CV_Error( CV_StsError, "'thresh' node is missing"); 03824 cvStartReadRawData( fs, thresh_node, &reader ); 03825 03826 if(is_uniform) 03827 { 03828 for(i=0; i<dims; i++) 03829 cvReadRawDataSlice( fs, &reader, 2, h->thresh[i], "f" ); 03830 h->thresh2 = NULL; 03831 } 03832 else 03833 { 03834 float* dim_ranges; 03835 h->thresh2 = (float**)cvAlloc( 03836 dims*sizeof(h->thresh2[0])+ 03837 total*sizeof(h->thresh2[0][0])); 03838 dim_ranges = (float*)(h->thresh2 + dims); 03839 for(i=0; i < dims; i++) 03840 { 03841 h->thresh2[i] = dim_ranges; 03842 cvReadRawDataSlice( fs, &reader, size[i]+1, dim_ranges, "f" ); 03843 dim_ranges += size[i] + 1; 03844 } 03845 } 03846 } 03847 03848 return h; 03849 } 03850 03851 static void icvWriteHist( CvFileStorage* fs, const char* name, 03852 const void* struct_ptr, CvAttrList /*attributes*/ ) 03853 { 03854 const CvHistogram * hist = (const CvHistogram *) struct_ptr; 03855 int sizes[CV_MAX_DIM]; 03856 int dims; 03857 int i; 03858 int is_uniform, have_ranges; 03859 03860 cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_HIST ); 03861 03862 is_uniform = (CV_IS_UNIFORM_HIST(hist) ? 1 : 0); 03863 have_ranges = (hist->type & CV_HIST_RANGES_FLAG ? 1 : 0); 03864 03865 cvWriteInt( fs, "type", (hist->type & 1) ); 03866 cvWriteInt( fs, "is_uniform", is_uniform ); 03867 cvWriteInt( fs, "have_ranges", have_ranges ); 03868 if(!CV_IS_SPARSE_HIST(hist)) 03869 cvWrite( fs, "mat", &(hist->mat) ); 03870 else 03871 cvWrite( fs, "bins", hist->bins ); 03872 03873 // write thresholds 03874 if(have_ranges){ 03875 dims = cvGetDims( hist->bins, sizes ); 03876 cvStartWriteStruct( fs, "thresh", CV_NODE_SEQ + CV_NODE_FLOW ); 03877 if(is_uniform){ 03878 for(i=0; i<dims; i++){ 03879 cvWriteRawData( fs, hist->thresh[i], 2, "f" ); 03880 } 03881 } 03882 else{ 03883 for(i=0; i<dims; i++){ 03884 cvWriteRawData( fs, hist->thresh2[i], sizes[i]+1, "f" ); 03885 } 03886 } 03887 cvEndWriteStruct( fs ); 03888 } 03889 03890 cvEndWriteStruct( fs ); 03891 } 03892 03893 03894 CvType hist_type( CV_TYPE_NAME_HIST, icvIsHist, (CvReleaseFunc)cvReleaseHist, 03895 icvReadHist, icvWriteHist, (CvCloneFunc)icvCloneHist ); 03896 03897 /* End of file. */ 03898
Generated on Tue Jul 12 2022 14:46:49 by
