Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project-sd-card by
rand.cpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other materials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 /* //////////////////////////////////////////////////////////////////// 00044 // 00045 // Filling CvMat/IplImage instances with random numbers 00046 // 00047 // */ 00048 00049 #include "precomp.hpp" 00050 00051 #if defined WIN32 || defined WINCE 00052 #include <windows.h> 00053 #undef small 00054 #undef min 00055 #undef max 00056 #undef abs 00057 //#else 00058 // #include <pthread.h> 00059 #endif 00060 00061 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) 00062 #include "emmintrin.h" 00063 #endif 00064 00065 namespace cv 00066 { 00067 00068 ///////////////////////////// Functions Declaration ////////////////////////////////////// 00069 00070 /* 00071 Multiply-with-carry generator is used here: 00072 temp = ( A*X(n) + carry ) 00073 X(n+1) = temp mod (2^32) 00074 carry = temp / (2^32) 00075 */ 00076 00077 #define RNG_NEXT(x) ((uint64)(unsigned)(x)*CV_RNG_COEFF + ((x) >> 32)) 00078 00079 /***************************************************************************************\ 00080 * Pseudo-Random Number Generators (PRNGs) * 00081 \***************************************************************************************/ 00082 00083 template<typename T> static void 00084 randBits_( T* arr, int len, uint64* state, const Vec2i* p, bool small_flag ) 00085 { 00086 uint64 temp = *state; 00087 int i; 00088 00089 if( !small_flag ) 00090 { 00091 for( i = 0; i <= len - 4; i += 4 ) 00092 { 00093 int t0, t1; 00094 00095 temp = RNG_NEXT(temp); 00096 t0 = ((int)temp & p[i][0]) + p[i][1]; 00097 temp = RNG_NEXT(temp); 00098 t1 = ((int)temp & p[i+1][0]) + p[i+1][1]; 00099 arr[i] = saturate_cast<T>(t0); 00100 arr[i+1] = saturate_cast<T>(t1); 00101 00102 temp = RNG_NEXT(temp); 00103 t0 = ((int)temp & p[i+2][0]) + p[i+2][1]; 00104 temp = RNG_NEXT(temp); 00105 t1 = ((int)temp & p[i+3][0]) + p[i+3][1]; 00106 arr[i+2] = saturate_cast<T>(t0); 00107 arr[i+3] = saturate_cast<T>(t1); 00108 } 00109 } 00110 else 00111 { 00112 for( i = 0; i <= len - 4; i += 4 ) 00113 { 00114 int t0, t1, t; 00115 temp = RNG_NEXT(temp); 00116 t = (int)temp; 00117 t0 = (t & p[i][0]) + p[i][1]; 00118 t1 = ((t >> 8) & p[i+1][0]) + p[i+1][1]; 00119 arr[i] = saturate_cast<T>(t0); 00120 arr[i+1] = saturate_cast<T>(t1); 00121 00122 t0 = ((t >> 16) & p[i+2][0]) + p[i+2][1]; 00123 t1 = ((t >> 24) & p[i+3][0]) + p[i+3][1]; 00124 arr[i+2] = saturate_cast<T>(t0); 00125 arr[i+3] = saturate_cast<T>(t1); 00126 } 00127 } 00128 00129 for( ; i < len; i++ ) 00130 { 00131 int t0; 00132 temp = RNG_NEXT(temp); 00133 00134 t0 = ((int)temp & p[i][0]) + p[i][1]; 00135 arr[i] = saturate_cast<T>(t0); 00136 } 00137 00138 *state = temp; 00139 } 00140 00141 struct DivStruct 00142 { 00143 unsigned d; 00144 unsigned M; 00145 int sh1, sh2; 00146 int delta; 00147 }; 00148 00149 template<typename T> static void 00150 randi_( T* arr, int len, uint64* state, const DivStruct* p ) 00151 { 00152 uint64 temp = *state; 00153 int i = 0; 00154 unsigned t0, t1, v0, v1; 00155 00156 for( i = 0; i <= len - 4; i += 4 ) 00157 { 00158 temp = RNG_NEXT(temp); 00159 t0 = (unsigned)temp; 00160 temp = RNG_NEXT(temp); 00161 t1 = (unsigned)temp; 00162 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32); 00163 v1 = (unsigned)(((uint64)t1 * p[i+1].M) >> 32); 00164 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2; 00165 v1 = (v1 + ((t1 - v1) >> p[i+1].sh1)) >> p[i+1].sh2; 00166 v0 = t0 - v0*p[i].d + p[i].delta; 00167 v1 = t1 - v1*p[i+1].d + p[i+1].delta; 00168 arr[i] = saturate_cast<T>((int)v0); 00169 arr[i+1] = saturate_cast<T>((int)v1); 00170 00171 temp = RNG_NEXT(temp); 00172 t0 = (unsigned)temp; 00173 temp = RNG_NEXT(temp); 00174 t1 = (unsigned)temp; 00175 v0 = (unsigned)(((uint64)t0 * p[i+2].M) >> 32); 00176 v1 = (unsigned)(((uint64)t1 * p[i+3].M) >> 32); 00177 v0 = (v0 + ((t0 - v0) >> p[i+2].sh1)) >> p[i+2].sh2; 00178 v1 = (v1 + ((t1 - v1) >> p[i+3].sh1)) >> p[i+3].sh2; 00179 v0 = t0 - v0*p[i+2].d + p[i+2].delta; 00180 v1 = t1 - v1*p[i+3].d + p[i+3].delta; 00181 arr[i+2] = saturate_cast<T>((int)v0); 00182 arr[i+3] = saturate_cast<T>((int)v1); 00183 } 00184 00185 for( ; i < len; i++ ) 00186 { 00187 temp = RNG_NEXT(temp); 00188 t0 = (unsigned)temp; 00189 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32); 00190 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2; 00191 v0 = t0 - v0*p[i].d + p[i].delta; 00192 arr[i] = saturate_cast<T>((int)v0); 00193 } 00194 00195 *state = temp; 00196 } 00197 00198 00199 #define DEF_RANDI_FUNC(suffix, type) \ 00200 static void randBits_##suffix(type* arr, int len, uint64* state, \ 00201 const Vec2i* p, bool small_flag) \ 00202 { randBits_(arr, len, state, p, small_flag); } \ 00203 \ 00204 static void randi_##suffix(type* arr, int len, uint64* state, \ 00205 const DivStruct* p, bool ) \ 00206 { randi_(arr, len, state, p); } 00207 00208 DEF_RANDI_FUNC(8u, uchar) 00209 DEF_RANDI_FUNC(8s, schar) 00210 DEF_RANDI_FUNC(16u, ushort) 00211 DEF_RANDI_FUNC(16s, short) 00212 DEF_RANDI_FUNC(32s, int) 00213 00214 static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, bool ) 00215 { 00216 uint64 temp = *state; 00217 int i = 0; 00218 00219 for( ; i <= len - 4; i += 4 ) 00220 { 00221 float f[4]; 00222 f[0] = (float)(int)(temp = RNG_NEXT(temp)); 00223 f[1] = (float)(int)(temp = RNG_NEXT(temp)); 00224 f[2] = (float)(int)(temp = RNG_NEXT(temp)); 00225 f[3] = (float)(int)(temp = RNG_NEXT(temp)); 00226 00227 // handwritten SSE is required not for performance but for numerical stability! 00228 // both 32-bit gcc and MSVC compilers trend to generate double precision SSE 00229 // while 64-bit compilers generate single precision SIMD instructions 00230 // so manual vectorisation forces all compilers to the single precision 00231 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) 00232 __m128 q0 = _mm_loadu_ps((const float*)(p + i)); 00233 __m128 q1 = _mm_loadu_ps((const float*)(p + i + 2)); 00234 00235 __m128 q01l = _mm_unpacklo_ps(q0, q1); 00236 __m128 q01h = _mm_unpackhi_ps(q0, q1); 00237 00238 __m128 p0 = _mm_unpacklo_ps(q01l, q01h); 00239 __m128 p1 = _mm_unpackhi_ps(q01l, q01h); 00240 00241 _mm_storeu_ps(arr + i, _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(f), p0), p1)); 00242 #else 00243 arr[i+0] = f[0]*p[i+0][0] + p[i+0][1]; 00244 arr[i+1] = f[1]*p[i+1][0] + p[i+1][1]; 00245 arr[i+2] = f[2]*p[i+2][0] + p[i+2][1]; 00246 arr[i+3] = f[3]*p[i+3][0] + p[i+3][1]; 00247 #endif 00248 } 00249 00250 for( ; i < len; i++ ) 00251 { 00252 temp = RNG_NEXT(temp); 00253 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) 00254 _mm_store_ss(arr + i, _mm_add_ss( 00255 _mm_mul_ss(_mm_set_ss((float)(int)temp), _mm_set_ss(p[i][0])), 00256 _mm_set_ss(p[i][1])) 00257 ); 00258 #else 00259 arr[i] = (int)temp*p[i][0] + p[i][1]; 00260 #endif 00261 } 00262 00263 *state = temp; 00264 } 00265 00266 00267 static void 00268 randf_64f( double* arr, int len, uint64* state, const Vec2d* p, bool ) 00269 { 00270 uint64 temp = *state; 00271 int64 v = 0; 00272 int i; 00273 00274 for( i = 0; i <= len - 4; i += 4 ) 00275 { 00276 double f0, f1; 00277 00278 temp = RNG_NEXT(temp); 00279 v = (temp >> 32)|(temp << 32); 00280 f0 = v*p[i][0] + p[i][1]; 00281 temp = RNG_NEXT(temp); 00282 v = (temp >> 32)|(temp << 32); 00283 f1 = v*p[i+1][0] + p[i+1][1]; 00284 arr[i] = f0; arr[i+1] = f1; 00285 00286 temp = RNG_NEXT(temp); 00287 v = (temp >> 32)|(temp << 32); 00288 f0 = v*p[i+2][0] + p[i+2][1]; 00289 temp = RNG_NEXT(temp); 00290 v = (temp >> 32)|(temp << 32); 00291 f1 = v*p[i+3][0] + p[i+3][1]; 00292 arr[i+2] = f0; arr[i+3] = f1; 00293 } 00294 00295 for( ; i < len; i++ ) 00296 { 00297 temp = RNG_NEXT(temp); 00298 v = (temp >> 32)|(temp << 32); 00299 arr[i] = v*p[i][0] + p[i][1]; 00300 } 00301 00302 *state = temp; 00303 } 00304 00305 typedef void (*RandFunc)(uchar* arr, int len, uint64* state, const void* p, bool small_flag); 00306 00307 00308 static RandFunc randTab[][8] = 00309 { 00310 { 00311 (RandFunc)randi_8u, (RandFunc)randi_8s, (RandFunc)randi_16u, (RandFunc)randi_16s, 00312 (RandFunc)randi_32s, (RandFunc)randf_32f, (RandFunc)randf_64f, 0 00313 }, 00314 { 00315 (RandFunc)randBits_8u, (RandFunc)randBits_8s, (RandFunc)randBits_16u, (RandFunc)randBits_16s, 00316 (RandFunc)randBits_32s, 0, 0, 0 00317 } 00318 }; 00319 00320 /* 00321 The code below implements the algorithm described in 00322 "The Ziggurat Method for Generating Random Variables" 00323 by Marsaglia and Tsang, Journal of Statistical Software. 00324 */ 00325 static void 00326 randn_0_1_32f( float* arr, int len, uint64* state ) 00327 { 00328 const float r = 3.442620f; // The start of the right tail 00329 const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32 00330 static unsigned kn[128]; 00331 static float wn[128], fn[128]; 00332 uint64 temp = *state; 00333 static bool initialized=false; 00334 int i; 00335 00336 if( !initialized ) 00337 { 00338 const double m1 = 2147483648.0; 00339 double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3; 00340 00341 // Set up the tables 00342 double q = vn/std::exp(-.5*dn*dn); 00343 kn[0] = (unsigned)((dn/q)*m1); 00344 kn[1] = 0; 00345 00346 wn[0] = (float)(q/m1); 00347 wn[127] = (float)(dn/m1); 00348 00349 fn[0] = 1.f; 00350 fn[127] = (float)std::exp(-.5*dn*dn); 00351 00352 for(i=126;i>=1;i--) 00353 { 00354 dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn))); 00355 kn[i+1] = (unsigned)((dn/tn)*m1); 00356 tn = dn; 00357 fn[i] = (float)std::exp(-.5*dn*dn); 00358 wn[i] = (float)(dn/m1); 00359 } 00360 initialized = true; 00361 } 00362 00363 for( i = 0; i < len; i++ ) 00364 { 00365 float x, y; 00366 for(;;) 00367 { 00368 int hz = (int)temp; 00369 temp = RNG_NEXT(temp); 00370 int iz = hz & 127; 00371 x = hz*wn[iz]; 00372 if( (unsigned)std::abs(hz) < kn[iz] ) 00373 break; 00374 if( iz == 0) // iz==0, handles the base strip 00375 { 00376 do 00377 { 00378 x = (unsigned)temp*rng_flt; 00379 temp = RNG_NEXT(temp); 00380 y = (unsigned)temp*rng_flt; 00381 temp = RNG_NEXT(temp); 00382 x = (float)(-std::log(x+FLT_MIN)*0.2904764); 00383 y = (float)-std::log(y+FLT_MIN); 00384 } // .2904764 is 1/r 00385 while( y + y < x*x ); 00386 x = hz > 0 ? r + x : -r - x; 00387 break; 00388 } 00389 // iz > 0, handle the wedges of other strips 00390 y = (unsigned)temp*rng_flt; 00391 temp = RNG_NEXT(temp); 00392 if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) ) 00393 break; 00394 } 00395 arr[i] = x; 00396 } 00397 *state = temp; 00398 } 00399 00400 00401 double RNG::gaussian(double sigma) 00402 { 00403 float temp; 00404 randn_0_1_32f( &temp, 1, &state ); 00405 return temp*sigma; 00406 } 00407 00408 00409 template<typename T, typename PT> static void 00410 randnScale_( const float* src, T* dst, int len, int cn, const PT* mean, const PT* stddev, bool stdmtx ) 00411 { 00412 int i, j, k; 00413 if( !stdmtx ) 00414 { 00415 if( cn == 1 ) 00416 { 00417 PT b = mean[0], a = stddev[0]; 00418 for( i = 0; i < len; i++ ) 00419 dst[i] = saturate_cast<T>(src[i]*a + b); 00420 } 00421 else 00422 { 00423 for( i = 0; i < len; i++, src += cn, dst += cn ) 00424 for( k = 0; k < cn; k++ ) 00425 dst[k] = saturate_cast<T>(src[k]*stddev[k] + mean[k]); 00426 } 00427 } 00428 else 00429 { 00430 for( i = 0; i < len; i++, src += cn, dst += cn ) 00431 { 00432 for( j = 0; j < cn; j++ ) 00433 { 00434 PT s = mean[j]; 00435 for( k = 0; k < cn; k++ ) 00436 s += src[k]*stddev[j*cn + k]; 00437 dst[j] = saturate_cast<T>(s); 00438 } 00439 } 00440 } 00441 } 00442 00443 static void randnScale_8u( const float* src, uchar* dst, int len, int cn, 00444 const float* mean, const float* stddev, bool stdmtx ) 00445 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00446 00447 static void randnScale_8s( const float* src, schar* dst, int len, int cn, 00448 const float* mean, const float* stddev, bool stdmtx ) 00449 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00450 00451 static void randnScale_16u( const float* src, ushort* dst, int len, int cn, 00452 const float* mean, const float* stddev, bool stdmtx ) 00453 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00454 00455 static void randnScale_16s( const float* src, short* dst, int len, int cn, 00456 const float* mean, const float* stddev, bool stdmtx ) 00457 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00458 00459 static void randnScale_32s( const float* src, int* dst, int len, int cn, 00460 const float* mean, const float* stddev, bool stdmtx ) 00461 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00462 00463 static void randnScale_32f( const float* src, float* dst, int len, int cn, 00464 const float* mean, const float* stddev, bool stdmtx ) 00465 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00466 00467 static void randnScale_64f( const float* src, double* dst, int len, int cn, 00468 const double* mean, const double* stddev, bool stdmtx ) 00469 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } 00470 00471 typedef void (*RandnScaleFunc)(const float* src, uchar* dst, int len, int cn, 00472 const uchar*, const uchar*, bool); 00473 00474 static RandnScaleFunc randnScaleTab[] = 00475 { 00476 (RandnScaleFunc)randnScale_8u, (RandnScaleFunc)randnScale_8s, (RandnScaleFunc)randnScale_16u, 00477 (RandnScaleFunc)randnScale_16s, (RandnScaleFunc)randnScale_32s, (RandnScaleFunc)randnScale_32f, 00478 (RandnScaleFunc)randnScale_64f, 0 00479 }; 00480 00481 void RNG::fill( InputOutputArray _mat, int disttype, 00482 InputArray _param1arg, InputArray _param2arg, bool saturateRange ) 00483 { 00484 Mat mat = _mat.getMat(), _param1 = _param1arg.getMat(), _param2 = _param2arg.getMat(); 00485 int depth = mat.depth(), cn = mat.channels(); 00486 AutoBuffer<double> _parambuf; 00487 int j, k, fast_int_mode = 0, smallFlag = 1; 00488 RandFunc func = 0; 00489 RandnScaleFunc scaleFunc = 0; 00490 00491 CV_Assert(_param1.channels() == 1 && (_param1.rows == 1 || _param1.cols == 1) && 00492 (_param1.rows + _param1.cols - 1 == cn || _param1.rows + _param1.cols - 1 == 1 || 00493 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))); 00494 CV_Assert( _param2.channels() == 1 && 00495 (((_param2.rows == 1 || _param2.cols == 1) && 00496 (_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 || 00497 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) || 00498 (_param2.rows == cn && _param2.cols == cn && disttype == NORMAL))); 00499 00500 Vec2i* ip = 0; 00501 Vec2d* dp = 0; 00502 Vec2f* fp = 0; 00503 DivStruct* ds = 0; 00504 uchar* mean = 0; 00505 uchar* stddev = 0; 00506 bool stdmtx = false; 00507 int n1 = (int)_param1.total(); 00508 int n2 = (int)_param2.total(); 00509 00510 if( disttype == UNIFORM ) 00511 { 00512 _parambuf.allocate(cn*8 + n1 + n2); 00513 double* parambuf = _parambuf; 00514 double* p1 = _param1.ptr<double>(); 00515 double* p2 = _param2.ptr<double>(); 00516 00517 if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn ) 00518 { 00519 Mat tmp(_param1.size(), CV_64F, parambuf); 00520 _param1.convertTo(tmp, CV_64F); 00521 p1 = parambuf; 00522 if( n1 < cn ) 00523 for( j = n1; j < cn; j++ ) 00524 p1[j] = p1[j-n1]; 00525 } 00526 00527 if( !_param2.isContinuous() || _param2.type() != CV_64F || n2 != cn ) 00528 { 00529 Mat tmp(_param2.size(), CV_64F, parambuf + cn); 00530 _param2.convertTo(tmp, CV_64F); 00531 p2 = parambuf + cn; 00532 if( n2 < cn ) 00533 for( j = n2; j < cn; j++ ) 00534 p2[j] = p2[j-n2]; 00535 } 00536 00537 if( depth <= CV_32S ) 00538 { 00539 ip = (Vec2i*)(parambuf + cn*2); 00540 for( j = 0, fast_int_mode = 1; j < cn; j++ ) 00541 { 00542 double a = std::min(p1[j], p2[j]); 00543 double b = std::max(p1[j], p2[j]); 00544 if( saturateRange ) 00545 { 00546 a = std::max(a, depth == CV_8U || depth == CV_16U ? 0. : 00547 depth == CV_8S ? -128. : depth == CV_16S ? -32768. : (double)INT_MIN); 00548 b = std::min(b, depth == CV_8U ? 256. : depth == CV_16U ? 65536. : 00549 depth == CV_8S ? 128. : depth == CV_16S ? 32768. : (double)INT_MAX); 00550 } 00551 ip[j][1] = cvCeil(a); 00552 int idiff = ip[j][0] = cvFloor(b) - ip[j][1] - 1; 00553 double diff = b - a; 00554 00555 fast_int_mode &= diff <= 4294967296. && (idiff & (idiff+1)) == 0; 00556 if( fast_int_mode ) 00557 smallFlag &= idiff <= 255; 00558 else 00559 { 00560 if( diff > INT_MAX ) 00561 ip[j][0] = INT_MAX; 00562 if( a < INT_MIN/2 ) 00563 ip[j][1] = INT_MIN/2; 00564 } 00565 } 00566 00567 if( !fast_int_mode ) 00568 { 00569 ds = (DivStruct*)(ip + cn); 00570 for( j = 0; j < cn; j++ ) 00571 { 00572 ds[j].delta = ip[j][1]; 00573 unsigned d = ds[j].d = (unsigned)(ip[j][0]+1); 00574 int l = 0; 00575 while(((uint64)1 << l) < d) 00576 l++; 00577 ds[j].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - d)/d) + 1; 00578 ds[j].sh1 = std::min(l, 1); 00579 ds[j].sh2 = std::max(l - 1, 0); 00580 } 00581 } 00582 00583 func = randTab[fast_int_mode][depth]; 00584 } 00585 else 00586 { 00587 double scale = depth == CV_64F ? 00588 5.4210108624275221700372640043497e-20 : // 2**-64 00589 2.3283064365386962890625e-10; // 2**-32 00590 double maxdiff = saturateRange ? (double)FLT_MAX : DBL_MAX; 00591 00592 // for each channel i compute such dparam[0][i] & dparam[1][i], 00593 // so that a signed 32/64-bit integer X is transformed to 00594 // the range [param1.val[i], param2.val[i]) using 00595 // dparam[1][i]*X + dparam[0][i] 00596 if( depth == CV_32F ) 00597 { 00598 fp = (Vec2f*)(parambuf + cn*2); 00599 for( j = 0; j < cn; j++ ) 00600 { 00601 fp[j][0] = (float)(std::min(maxdiff, p2[j] - p1[j])*scale); 00602 fp[j][1] = (float)((p2[j] + p1[j])*0.5); 00603 } 00604 } 00605 else 00606 { 00607 dp = (Vec2d*)(parambuf + cn*2); 00608 for( j = 0; j < cn; j++ ) 00609 { 00610 dp[j][0] = std::min(DBL_MAX, p2[j] - p1[j])*scale; 00611 dp[j][1] = ((p2[j] + p1[j])*0.5); 00612 } 00613 } 00614 00615 func = randTab[0][depth]; 00616 } 00617 CV_Assert( func != 0 ); 00618 } 00619 else if( disttype == CV_RAND_NORMAL ) 00620 { 00621 _parambuf.allocate(MAX(n1, cn) + MAX(n2, cn)); 00622 double* parambuf = _parambuf; 00623 00624 int ptype = depth == CV_64F ? CV_64F : CV_32F; 00625 int esz = (int)CV_ELEM_SIZE(ptype); 00626 00627 if( _param1.isContinuous() && _param1.type() == ptype ) 00628 mean = _param1.ptr(); 00629 else 00630 { 00631 Mat tmp(_param1.size(), ptype, parambuf); 00632 _param1.convertTo(tmp, ptype); 00633 mean = (uchar*)parambuf; 00634 } 00635 00636 if( n1 < cn ) 00637 for( j = n1*esz; j < cn*esz; j++ ) 00638 mean[j] = mean[j - n1*esz]; 00639 00640 if( _param2.isContinuous() && _param2.type() == ptype ) 00641 stddev = _param2.ptr(); 00642 else 00643 { 00644 Mat tmp(_param2.size(), ptype, parambuf + cn); 00645 _param2.convertTo(tmp, ptype); 00646 stddev = (uchar*)(parambuf + cn); 00647 } 00648 00649 if( n1 < cn ) 00650 for( j = n1*esz; j < cn*esz; j++ ) 00651 stddev[j] = stddev[j - n1*esz]; 00652 00653 stdmtx = _param2.rows == cn && _param2.cols == cn; 00654 scaleFunc = randnScaleTab[depth]; 00655 CV_Assert( scaleFunc != 0 ); 00656 } 00657 else 00658 CV_Error( CV_StsBadArg, "Unknown distribution type" ); 00659 00660 const Mat* arrays[] = {&mat, 0}; 00661 uchar* ptr; 00662 NAryMatIterator it(arrays, &ptr); 00663 int total = (int)it.size, blockSize = std::min((BLOCK_SIZE + cn - 1)/cn, total); 00664 size_t esz = mat.elemSize(); 00665 AutoBuffer<double> buf; 00666 uchar* param = 0; 00667 float* nbuf = 0; 00668 00669 if( disttype == UNIFORM ) 00670 { 00671 buf.allocate(blockSize*cn*4); 00672 param = (uchar*)(double*)buf; 00673 00674 if( ip ) 00675 { 00676 if( ds ) 00677 { 00678 DivStruct* p = (DivStruct*)param; 00679 for( j = 0; j < blockSize*cn; j += cn ) 00680 for( k = 0; k < cn; k++ ) 00681 p[j + k] = ds[k]; 00682 } 00683 else 00684 { 00685 Vec2i* p = (Vec2i*)param; 00686 for( j = 0; j < blockSize*cn; j += cn ) 00687 for( k = 0; k < cn; k++ ) 00688 p[j + k] = ip[k]; 00689 } 00690 } 00691 else if( fp ) 00692 { 00693 Vec2f* p = (Vec2f*)param; 00694 for( j = 0; j < blockSize*cn; j += cn ) 00695 for( k = 0; k < cn; k++ ) 00696 p[j + k] = fp[k]; 00697 } 00698 else 00699 { 00700 Vec2d* p = (Vec2d*)param; 00701 for( j = 0; j < blockSize*cn; j += cn ) 00702 for( k = 0; k < cn; k++ ) 00703 p[j + k] = dp[k]; 00704 } 00705 } 00706 else 00707 { 00708 buf.allocate((blockSize*cn+1)/2); 00709 nbuf = (float*)(double*)buf; 00710 } 00711 00712 for( size_t i = 0; i < it.nplanes; i++, ++it ) 00713 { 00714 for( j = 0; j < total; j += blockSize ) 00715 { 00716 int len = std::min(total - j, blockSize); 00717 00718 if( disttype == CV_RAND_UNI ) 00719 func( ptr, len*cn, &state, param, smallFlag != 0 ); 00720 else 00721 { 00722 randn_0_1_32f(nbuf, len*cn, &state); 00723 scaleFunc(nbuf, ptr, len, cn, mean, stddev, stdmtx); 00724 } 00725 ptr += len*esz; 00726 } 00727 } 00728 } 00729 00730 } 00731 00732 cv::RNG& cv::theRNG() 00733 { 00734 return getCoreTlsData().get()->rng; 00735 } 00736 00737 void cv::randu(InputOutputArray dst, InputArray low, InputArray high) 00738 { 00739 theRNG().fill(dst, RNG::UNIFORM, low, high); 00740 } 00741 00742 void cv::randn(InputOutputArray dst, InputArray mean, InputArray stddev) 00743 { 00744 theRNG().fill(dst, RNG::NORMAL, mean, stddev); 00745 } 00746 00747 namespace cv 00748 { 00749 00750 template<typename T> static void 00751 randShuffle_( Mat& _arr, RNG& rng, double ) 00752 { 00753 unsigned sz = (unsigned)_arr.total(); 00754 if( _arr.isContinuous() ) 00755 { 00756 T* arr = _arr.ptr<T>(); 00757 for( unsigned i = 0; i < sz; i++ ) 00758 { 00759 unsigned j = (unsigned)rng % sz; 00760 std::swap( arr[j], arr[i] ); 00761 } 00762 } 00763 else 00764 { 00765 CV_Assert( _arr.dims <= 2 ); 00766 uchar* data = _arr.ptr(); 00767 size_t step = _arr.step; 00768 int rows = _arr.rows; 00769 int cols = _arr.cols; 00770 for( int i0 = 0; i0 < rows; i0++ ) 00771 { 00772 T* p = _arr.ptr<T>(i0); 00773 for( int j0 = 0; j0 < cols; j0++ ) 00774 { 00775 unsigned k1 = (unsigned)rng % sz; 00776 int i1 = (int)(k1 / cols); 00777 int j1 = (int)(k1 - (unsigned)i1*(unsigned)cols); 00778 std::swap( p[j0], ((T*)(data + step*i1))[j1] ); 00779 } 00780 } 00781 } 00782 } 00783 00784 typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor ); 00785 00786 } 00787 00788 void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng ) 00789 { 00790 RandShuffleFunc tab[] = 00791 { 00792 0, 00793 randShuffle_<uchar>, // 1 00794 randShuffle_<ushort>, // 2 00795 randShuffle_<Vec<uchar,3> >, // 3 00796 randShuffle_<int>, // 4 00797 0, 00798 randShuffle_<Vec<ushort,3> >, // 6 00799 0, 00800 randShuffle_<Vec<int,2> >, // 8 00801 0, 0, 0, 00802 randShuffle_<Vec<int,3> >, // 12 00803 0, 0, 0, 00804 randShuffle_<Vec<int,4> >, // 16 00805 0, 0, 0, 0, 0, 0, 0, 00806 randShuffle_<Vec<int,6> >, // 24 00807 0, 0, 0, 0, 0, 0, 0, 00808 randShuffle_<Vec<int,8> > // 32 00809 }; 00810 00811 Mat dst = _dst.getMat(); 00812 RNG& rng = _rng ? *_rng : theRNG(); 00813 CV_Assert( dst.elemSize() <= 32 ); 00814 RandShuffleFunc func = tab[dst.elemSize()]; 00815 CV_Assert( func != 0 ); 00816 func( dst, rng, iterFactor ); 00817 } 00818 00819 CV_IMPL void 00820 cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 ) 00821 { 00822 cv::Mat mat = cv::cvarrToMat(arr); 00823 // !!! this will only work for current 64-bit MWC RNG !!! 00824 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG(); 00825 rng.fill(mat, disttype == CV_RAND_NORMAL ? 00826 cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar (param1), cv::Scalar (param2) ); 00827 } 00828 00829 CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor ) 00830 { 00831 cv::Mat dst = cv::cvarrToMat(arr); 00832 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG(); 00833 cv::randShuffle( dst, iter_factor, &rng ); 00834 } 00835 00836 // Mersenne Twister random number generator. 00837 // Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c 00838 00839 /* 00840 A C-program for MT19937, with initialization improved 2002/1/26. 00841 Coded by Takuji Nishimura and Makoto Matsumoto. 00842 00843 Before using, initialize the state by using init_genrand(seed) 00844 or init_by_array(init_key, key_length). 00845 00846 Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, 00847 All rights reserved. 00848 00849 Redistribution and use in source and binary forms, with or without 00850 modification, are permitted provided that the following conditions 00851 are met: 00852 00853 1. Redistributions of source code must retain the above copyright 00854 notice, this list of conditions and the following disclaimer. 00855 00856 2. Redistributions in binary form must reproduce the above copyright 00857 notice, this list of conditions and the following disclaimer in the 00858 documentation and/or other materials provided with the distribution. 00859 00860 3. The names of its contributors may not be used to endorse or promote 00861 products derived from this software without specific prior written 00862 permission. 00863 00864 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 00865 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 00866 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 00867 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 00868 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00869 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00870 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00871 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00872 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00873 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00874 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00875 00876 00877 Any feedback is very welcome. 00878 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html 00879 email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) 00880 */ 00881 00882 cv::RNG_MT19937::RNG_MT19937(unsigned s) { seed(s); } 00883 00884 cv::RNG_MT19937::RNG_MT19937() { seed(5489U); } 00885 00886 void cv::RNG_MT19937::seed(unsigned s) 00887 { 00888 state[0]= s; 00889 for (mti = 1; mti < N; mti++) 00890 { 00891 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ 00892 state[mti] = (1812433253U * (state[mti - 1] ^ (state[mti - 1] >> 30)) + mti); 00893 } 00894 } 00895 00896 unsigned cv::RNG_MT19937::next() 00897 { 00898 /* mag01[x] = x * MATRIX_A for x=0,1 */ 00899 static unsigned mag01[2] = { 0x0U, /*MATRIX_A*/ 0x9908b0dfU}; 00900 00901 const unsigned UPPER_MASK = 0x80000000U; 00902 const unsigned LOWER_MASK = 0x7fffffffU; 00903 00904 /* generate N words at one time */ 00905 if (mti >= N) 00906 { 00907 int kk = 0; 00908 00909 for (; kk < N - M; ++kk) 00910 { 00911 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK); 00912 state[kk] = state[kk + M] ^ (y >> 1) ^ mag01[y & 0x1U]; 00913 } 00914 00915 for (; kk < N - 1; ++kk) 00916 { 00917 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK); 00918 state[kk] = state[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1U]; 00919 } 00920 00921 unsigned y = (state[N - 1] & UPPER_MASK) | (state[0] & LOWER_MASK); 00922 state[N - 1] = state[M - 1] ^ (y >> 1) ^ mag01[y & 0x1U]; 00923 00924 mti = 0; 00925 } 00926 00927 unsigned y = state[mti++]; 00928 00929 /* Tempering */ 00930 y ^= (y >> 11); 00931 y ^= (y << 7) & 0x9d2c5680U; 00932 y ^= (y << 15) & 0xefc60000U; 00933 y ^= (y >> 18); 00934 00935 return y; 00936 } 00937 00938 cv::RNG_MT19937::operator unsigned() { return next(); } 00939 00940 cv::RNG_MT19937::operator int() { return (int)next();} 00941 00942 cv::RNG_MT19937::operator float() { return next() * (1.f / 4294967296.f); } 00943 00944 cv::RNG_MT19937::operator double() 00945 { 00946 unsigned a = next() >> 5; 00947 unsigned b = next() >> 6; 00948 return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0); 00949 } 00950 00951 int cv::RNG_MT19937::uniform(int a, int b) { return (int)(next() % (b - a) + a); } 00952 00953 float cv::RNG_MT19937::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } 00954 00955 double cv::RNG_MT19937::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } 00956 00957 unsigned cv::RNG_MT19937::operator ()(unsigned b) { return next() % b; } 00958 00959 unsigned cv::RNG_MT19937::operator ()() { return next(); } 00960 00961 /* End of file. */ 00962
Generated on Tue Jul 12 2022 14:47:33 by
