Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project by
undistort.cpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other materials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 #include "precomp.hpp" 00044 #include "opencv2/imgproc/detail/distortion_model.hpp" 00045 00046 cv::Mat cv::getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize, 00047 bool centerPrincipalPoint ) 00048 { 00049 Mat cameraMatrix = _cameraMatrix.getMat(); 00050 if( !centerPrincipalPoint && cameraMatrix.type() == CV_64F ) 00051 return cameraMatrix; 00052 00053 Mat newCameraMatrix; 00054 cameraMatrix.convertTo(newCameraMatrix, CV_64F); 00055 if( centerPrincipalPoint ) 00056 { 00057 newCameraMatrix.ptr<double>()[2] = (imgsize.width-1)*0.5; 00058 newCameraMatrix.ptr<double>()[5] = (imgsize.height-1)*0.5; 00059 } 00060 return newCameraMatrix; 00061 } 00062 00063 void cv::initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs, 00064 InputArray _matR, InputArray _newCameraMatrix, 00065 Size size, int m1type, OutputArray _map1, OutputArray _map2 ) 00066 { 00067 Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); 00068 Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); 00069 00070 if( m1type <= 0 ) 00071 m1type = CV_16SC2; 00072 CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 ); 00073 _map1.create( size, m1type ); 00074 Mat map1 = _map1.getMat(), map2; 00075 if( m1type != CV_32FC2 ) 00076 { 00077 _map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); 00078 map2 = _map2.getMat(); 00079 } 00080 else 00081 _map2.release(); 00082 00083 Mat_<double> R = Mat_<double>::eye(3, 3); 00084 Mat_<double> A = Mat_<double> (cameraMatrix), Ar; 00085 00086 if( !newCameraMatrix.empty() ) 00087 Ar = Mat_<double> (newCameraMatrix); 00088 else 00089 Ar = getDefaultNewCameraMatrix( A, size, true ); 00090 00091 if( !matR.empty() ) 00092 R = Mat_<double> (matR); 00093 00094 if( !distCoeffs.empty() ) 00095 distCoeffs = Mat_<double> (distCoeffs); 00096 else 00097 { 00098 distCoeffs.create(14, 1, CV_64F); 00099 distCoeffs = 0.; 00100 } 00101 00102 CV_Assert( A.size() == Size(3,3) && A.size() == R.size() ); 00103 CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3)); 00104 Mat_<double> iR = (Ar.colRange(0,3)*R).inv(DECOMP_LU); 00105 const double* ir = &iR(0,0); 00106 00107 double u0 = A(0, 2), v0 = A(1, 2); 00108 double fx = A(0, 0), fy = A(1, 1); 00109 00110 CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) || 00111 distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) || 00112 distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1) || 00113 distCoeffs.size() == Size(1, 12) || distCoeffs.size() == Size(12, 1) || 00114 distCoeffs.size() == Size(1, 14) || distCoeffs.size() == Size(14, 1)); 00115 00116 if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() ) 00117 distCoeffs = distCoeffs.t(); 00118 00119 const double* const distPtr = distCoeffs.ptr<double>(); 00120 double k1 = distPtr[0]; 00121 double k2 = distPtr[1]; 00122 double p1 = distPtr[2]; 00123 double p2 = distPtr[3]; 00124 double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? distPtr[4] : 0.; 00125 double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[5] : 0.; 00126 double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[6] : 0.; 00127 double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[7] : 0.; 00128 double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[8] : 0.; 00129 double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[9] : 0.; 00130 double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[10] : 0.; 00131 double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[11] : 0.; 00132 double tauX = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[12] : 0.; 00133 double tauY = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[13] : 0.; 00134 00135 // Matrix for trapezoidal distortion of tilted image sensor 00136 cv::Matx33d matTilt = cv::Matx33d::eye(); 00137 cv::detail::computeTiltProjectionMatrix(tauX, tauY, &matTilt); 00138 00139 for( int i = 0; i < size.height; i++ ) 00140 { 00141 float* m1f = map1.ptr<float>(i); 00142 float* m2f = map2.empty() ? 0 : map2.ptr<float>(i); 00143 short* m1 = (short*)m1f; 00144 ushort* m2 = (ushort*)m2f; 00145 double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8]; 00146 00147 for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] ) 00148 { 00149 double w = 1./_w, x = _x*w, y = _y*w; 00150 double x2 = x*x, y2 = y*y; 00151 double r2 = x2 + y2, _2xy = 2*x*y; 00152 double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2); 00153 double xd = (x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+s2*r2*r2); 00154 double yd = (y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+s4*r2*r2); 00155 cv::Vec3d vecTilt = matTilt*cv::Vec3d(xd, yd, 1); 00156 double invProj = vecTilt(2) ? 1./vecTilt(2) : 1; 00157 double u = fx*invProj*vecTilt(0) + u0; 00158 double v = fy*invProj*vecTilt(1) + v0; 00159 if( m1type == CV_16SC2 ) 00160 { 00161 int iu = saturate_cast<int>(u*INTER_TAB_SIZE); 00162 int iv = saturate_cast<int>(v*INTER_TAB_SIZE); 00163 m1[j*2] = (short)(iu >> INTER_BITS); 00164 m1[j*2+1] = (short)(iv >> INTER_BITS); 00165 m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1))); 00166 } 00167 else if( m1type == CV_32FC1 ) 00168 { 00169 m1f[j] = (float)u; 00170 m2f[j] = (float)v; 00171 } 00172 else 00173 { 00174 m1f[j*2] = (float)u; 00175 m1f[j*2+1] = (float)v; 00176 } 00177 } 00178 } 00179 } 00180 00181 00182 void cv::undistort( InputArray _src, OutputArray _dst, InputArray _cameraMatrix, 00183 InputArray _distCoeffs, InputArray _newCameraMatrix ) 00184 { 00185 Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); 00186 Mat distCoeffs = _distCoeffs.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); 00187 00188 _dst.create( src.size(), src.type() ); 00189 Mat dst = _dst.getMat(); 00190 00191 CV_Assert( dst.data != src.data ); 00192 00193 int stripe_size0 = std::min(std::max(1, (1 << 12) / std::max(src.cols, 1)), src.rows); 00194 Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1); 00195 00196 Mat_<double> A, Ar, I = Mat_<double>::eye(3,3); 00197 00198 cameraMatrix.convertTo(A, CV_64F); 00199 if( !distCoeffs.empty() ) 00200 distCoeffs = Mat_<double> (distCoeffs); 00201 else 00202 { 00203 distCoeffs.create(5, 1, CV_64F); 00204 distCoeffs = 0.; 00205 } 00206 00207 if( !newCameraMatrix.empty() ) 00208 newCameraMatrix.convertTo(Ar, CV_64F); 00209 else 00210 A.copyTo(Ar); 00211 00212 double v0 = Ar(1, 2); 00213 for( int y = 0; y < src.rows; y += stripe_size0 ) 00214 { 00215 int stripe_size = std::min( stripe_size0, src.rows - y ); 00216 Ar(1, 2) = v0 - y; 00217 Mat map1_part = map1.rowRange(0, stripe_size), 00218 map2_part = map2.rowRange(0, stripe_size), 00219 dst_part = dst.rowRange(y, y + stripe_size); 00220 00221 initUndistortRectifyMap( A, distCoeffs, I, Ar, Size(src.cols, stripe_size), 00222 map1_part.type(), map1_part, map2_part ); 00223 remap( src, dst_part, map1_part, map2_part, INTER_LINEAR, BORDER_CONSTANT ); 00224 } 00225 } 00226 00227 00228 CV_IMPL void 00229 cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs, const CvMat* newAarr ) 00230 { 00231 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst; 00232 cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs), newA; 00233 if( newAarr ) 00234 newA = cv::cvarrToMat(newAarr); 00235 00236 CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); 00237 cv::undistort( src, dst, A, distCoeffs, newA ); 00238 } 00239 00240 00241 CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs, 00242 CvArr* mapxarr, CvArr* mapyarr ) 00243 { 00244 cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs); 00245 cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0; 00246 00247 if( mapyarr ) 00248 mapy0 = mapy = cv::cvarrToMat(mapyarr); 00249 00250 cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A, 00251 mapx.size(), mapx.type(), mapx, mapy ); 00252 CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data ); 00253 } 00254 00255 void 00256 cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs, 00257 const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr ) 00258 { 00259 cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar; 00260 cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0; 00261 00262 if( mapyarr ) 00263 mapy0 = mapy = cv::cvarrToMat(mapyarr); 00264 00265 if( dist_coeffs ) 00266 distCoeffs = cv::cvarrToMat(dist_coeffs); 00267 if( Rarr ) 00268 R = cv::cvarrToMat(Rarr); 00269 if( ArArr ) 00270 Ar = cv::cvarrToMat(ArArr); 00271 00272 cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy ); 00273 CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data ); 00274 } 00275 00276 00277 void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, 00278 const CvMat* _distCoeffs, 00279 const CvMat* matR, const CvMat* matP ) 00280 { 00281 double A[3][3], RR[3][3], k[14]={0,0,0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy; 00282 CvMat matA=cvMat(3, 3, CV_64F, A), _Dk; 00283 CvMat _RR=cvMat(3, 3, CV_64F, RR); 00284 const CvPoint2D32f* srcf; 00285 const CvPoint2D64f* srcd; 00286 CvPoint2D32f* dstf; 00287 CvPoint2D64f* dstd; 00288 int stype, dtype; 00289 int sstep, dstep; 00290 int i, j, n, iters = 1; 00291 cv::Matx33d invMatTilt = cv::Matx33d::eye(); 00292 00293 CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) && 00294 (_src->rows == 1 || _src->cols == 1) && 00295 (_dst->rows == 1 || _dst->cols == 1) && 00296 _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 && 00297 (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) && 00298 (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2)); 00299 00300 CV_Assert( CV_IS_MAT(_cameraMatrix) && 00301 _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 ); 00302 00303 cvConvert( _cameraMatrix, &matA ); 00304 00305 if( _distCoeffs ) 00306 { 00307 CV_Assert( CV_IS_MAT(_distCoeffs) && 00308 (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) && 00309 (_distCoeffs->rows*_distCoeffs->cols == 4 || 00310 _distCoeffs->rows*_distCoeffs->cols == 5 || 00311 _distCoeffs->rows*_distCoeffs->cols == 8 || 00312 _distCoeffs->rows*_distCoeffs->cols == 12 || 00313 _distCoeffs->rows*_distCoeffs->cols == 14)); 00314 00315 _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols, 00316 CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k); 00317 00318 cvConvert( _distCoeffs, &_Dk ); 00319 iters = 5; 00320 if (k[12] != 0 || k[13] != 0) 00321 cv::detail::computeTiltProjectionMatrix<double>(k[12], k[13], NULL, NULL, NULL, &invMatTilt); 00322 } 00323 00324 if( matR ) 00325 { 00326 CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 ); 00327 cvConvert( matR, &_RR ); 00328 } 00329 else 00330 cvSetIdentity(&_RR); 00331 00332 if( matP ) 00333 { 00334 double PP[3][3]; 00335 CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP); 00336 CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4)); 00337 cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP ); 00338 cvMatMul( &_PP, &_RR, &_RR ); 00339 } 00340 00341 srcf = (const CvPoint2D32f*)_src->data.ptr; 00342 srcd = (const CvPoint2D64f*)_src->data.ptr; 00343 dstf = (CvPoint2D32f*)_dst->data.ptr; 00344 dstd = (CvPoint2D64f*)_dst->data.ptr; 00345 stype = CV_MAT_TYPE(_src->type); 00346 dtype = CV_MAT_TYPE(_dst->type); 00347 sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype); 00348 dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype); 00349 00350 n = _src->rows + _src->cols - 1; 00351 00352 fx = A[0][0]; 00353 fy = A[1][1]; 00354 ifx = 1./fx; 00355 ify = 1./fy; 00356 cx = A[0][2]; 00357 cy = A[1][2]; 00358 00359 for( i = 0; i < n; i++ ) 00360 { 00361 double x, y, x0, y0; 00362 if( stype == CV_32FC2 ) 00363 { 00364 x = srcf[i*sstep].x; 00365 y = srcf[i*sstep].y; 00366 } 00367 else 00368 { 00369 x = srcd[i*sstep].x; 00370 y = srcd[i*sstep].y; 00371 } 00372 00373 x = (x - cx)*ifx; 00374 y = (y - cy)*ify; 00375 00376 // compensate tilt distortion 00377 cv::Vec3d vecUntilt = invMatTilt * cv::Vec3d(x, y, 1); 00378 double invProj = vecUntilt(2) ? 1./vecUntilt(2) : 1; 00379 x0 = x = invProj * vecUntilt(0); 00380 y0 = y = invProj * vecUntilt(1); 00381 00382 // compensate distortion iteratively 00383 for( j = 0; j < iters; j++ ) 00384 { 00385 double r2 = x*x + y*y; 00386 double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2); 00387 double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x)+ k[8]*r2+k[9]*r2*r2; 00388 double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y+ k[10]*r2+k[11]*r2*r2; 00389 x = (x0 - deltaX)*icdist; 00390 y = (y0 - deltaY)*icdist; 00391 } 00392 00393 double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2]; 00394 double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2]; 00395 double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]); 00396 x = xx*ww; 00397 y = yy*ww; 00398 00399 if( dtype == CV_32FC2 ) 00400 { 00401 dstf[i*dstep].x = (float)x; 00402 dstf[i*dstep].y = (float)y; 00403 } 00404 else 00405 { 00406 dstd[i*dstep].x = x; 00407 dstd[i*dstep].y = y; 00408 } 00409 } 00410 } 00411 00412 00413 void cv::undistortPoints( InputArray _src, OutputArray _dst, 00414 InputArray _cameraMatrix, 00415 InputArray _distCoeffs, 00416 InputArray _Rmat, 00417 InputArray _Pmat ) 00418 { 00419 Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); 00420 Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); 00421 00422 CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) && 00423 ((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2)); 00424 00425 _dst.create(src.size(), src.type(), -1, true); 00426 Mat dst = _dst.getMat(); 00427 00428 CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix; 00429 CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0; 00430 if( !R.empty() ) 00431 pR = &(matR = R); 00432 if( !P.empty() ) 00433 pP = &(matP = P); 00434 if( !distCoeffs.empty() ) 00435 pD = &(_cdistCoeffs = distCoeffs); 00436 cvUndistortPoints(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP); 00437 } 00438 00439 namespace cv 00440 { 00441 00442 static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, int projType) 00443 { 00444 double x = p.x, y = p.y; 00445 double beta = 1 + 2*alpha; 00446 double v = x*x + y*y + 1, iv = 1/v; 00447 double u = std::sqrt(beta*v + alpha*alpha); 00448 00449 double k = (u - alpha)*iv; 00450 double kv = (v*beta/u - (u - alpha)*2)*iv*iv; 00451 double kx = kv*x, ky = kv*y; 00452 00453 if( projType == PROJ_SPHERICAL_ORTHO ) 00454 { 00455 if(J) 00456 *J = Vec4d(kx*x + k, kx*y, ky*x, ky*y + k); 00457 return Point2f((float)(x*k), (float)(y*k)); 00458 } 00459 if( projType == PROJ_SPHERICAL_EQRECT ) 00460 { 00461 // equirectangular 00462 double iR = 1/(alpha + 1); 00463 double x1 = std::max(std::min(x*k*iR, 1.), -1.); 00464 double y1 = std::max(std::min(y*k*iR, 1.), -1.); 00465 00466 if(J) 00467 { 00468 double fx1 = iR/std::sqrt(1 - x1*x1); 00469 double fy1 = iR/std::sqrt(1 - y1*y1); 00470 *J = Vec4d(fx1*(kx*x + k), fx1*ky*x, fy1*kx*y, fy1*(ky*y + k)); 00471 } 00472 return Point2f((float)asin(x1), (float)asin(y1)); 00473 } 00474 CV_Error(CV_StsBadArg, "Unknown projection type"); 00475 return Point2f(); 00476 } 00477 00478 00479 static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType) 00480 { 00481 static int avgiter = 0, avgn = 0; 00482 00483 double eps = 1e-12; 00484 Vec2d p(_p.x, _p.y), q(_p.x, _p.y), err; 00485 Vec4d J; 00486 int i, maxiter = 5; 00487 00488 for( i = 0; i < maxiter; i++ ) 00489 { 00490 Point2f p1 = mapPointSpherical(Point2f((float)q[0], (float)q[1]), alpha, &J, projType); 00491 err = Vec2d(p1.x, p1.y) - p; 00492 if( err[0]*err[0] + err[1]*err[1] < eps ) 00493 break; 00494 00495 Vec4d JtJ(J[0]*J[0] + J[2]*J[2], J[0]*J[1] + J[2]*J[3], 00496 J[0]*J[1] + J[2]*J[3], J[1]*J[1] + J[3]*J[3]); 00497 double d = JtJ[0]*JtJ[3] - JtJ[1]*JtJ[2]; 00498 d = d ? 1./d : 0; 00499 Vec4d iJtJ(JtJ[3]*d, -JtJ[1]*d, -JtJ[2]*d, JtJ[0]*d); 00500 Vec2d JtErr(J[0]*err[0] + J[2]*err[1], J[1]*err[0] + J[3]*err[1]); 00501 00502 q -= Vec2d(iJtJ[0]*JtErr[0] + iJtJ[1]*JtErr[1], iJtJ[2]*JtErr[0] + iJtJ[3]*JtErr[1]); 00503 //Matx22d J(kx*x + k, kx*y, ky*x, ky*y + k); 00504 //q -= Vec2d((J.t()*J).inv()*(J.t()*err)); 00505 } 00506 00507 if( i < maxiter ) 00508 { 00509 avgiter += i; 00510 avgn++; 00511 if( avgn == 1500 ) 00512 printf("avg iters = %g\n", (double)avgiter/avgn); 00513 } 00514 00515 return i < maxiter ? Point2f((float)q[0], (float)q[1]) : Point2f(-FLT_MAX, -FLT_MAX); 00516 } 00517 00518 } 00519 00520 float cv::initWideAngleProjMap( InputArray _cameraMatrix0, InputArray _distCoeffs0, 00521 Size imageSize, int destImageWidth, int m1type, 00522 OutputArray _map1, OutputArray _map2, int projType, double _alpha ) 00523 { 00524 Mat cameraMatrix0 = _cameraMatrix0.getMat(), distCoeffs0 = _distCoeffs0.getMat(); 00525 double k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, M[9]={0,0,0,0,0,0,0,0,0}; 00526 Mat distCoeffs(distCoeffs0.rows, distCoeffs0.cols, CV_MAKETYPE(CV_64F,distCoeffs0.channels()), k); 00527 Mat cameraMatrix(3,3,CV_64F,M); 00528 Point2f scenter((float)cameraMatrix.at<double>(0,2), (float)cameraMatrix.at<double>(1,2)); 00529 Point2f dcenter((destImageWidth-1)*0.5f, 0.f); 00530 float xmin = FLT_MAX, xmax = -FLT_MAX, ymin = FLT_MAX, ymax = -FLT_MAX; 00531 int N = 9; 00532 std::vector<Point2f> uvec(1), vvec(1); 00533 Mat I = Mat::eye(3,3,CV_64F); 00534 float alpha = (float)_alpha; 00535 00536 int ndcoeffs = distCoeffs0.cols*distCoeffs0.rows*distCoeffs0.channels(); 00537 CV_Assert((distCoeffs0.cols == 1 || distCoeffs0.rows == 1) && 00538 (ndcoeffs == 4 || ndcoeffs == 5 || ndcoeffs == 8 || ndcoeffs == 12 || ndcoeffs == 14)); 00539 CV_Assert(cameraMatrix0.size() == Size(3,3)); 00540 distCoeffs0.convertTo(distCoeffs,CV_64F); 00541 cameraMatrix0.convertTo(cameraMatrix,CV_64F); 00542 00543 alpha = std::min(alpha, 0.999f); 00544 00545 for( int i = 0; i < N; i++ ) 00546 for( int j = 0; j < N; j++ ) 00547 { 00548 Point2f p((float)j*imageSize.width/(N-1), (float)i*imageSize.height/(N-1)); 00549 uvec[0] = p; 00550 undistortPoints(uvec, vvec, cameraMatrix, distCoeffs, I, I); 00551 Point2f q = mapPointSpherical(vvec[0], alpha, 0, projType); 00552 if( xmin > q.x ) xmin = q.x; 00553 if( xmax < q.x ) xmax = q.x; 00554 if( ymin > q.y ) ymin = q.y; 00555 if( ymax < q.y ) ymax = q.y; 00556 } 00557 00558 float scale = (float)std::min(dcenter.x/fabs(xmax), dcenter.x/fabs(xmin)); 00559 Size dsize(destImageWidth, cvCeil(std::max(scale*fabs(ymin)*2, scale*fabs(ymax)*2))); 00560 dcenter.y = (dsize.height - 1)*0.5f; 00561 00562 Mat mapxy(dsize, CV_32FC2); 00563 double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7], s1 = k[8], s2 = k[9], s3 = k[10], s4 = k[11]; 00564 double fx = cameraMatrix.at<double>(0,0), fy = cameraMatrix.at<double>(1,1), cx = scenter.x, cy = scenter.y; 00565 cv::Matx33d matTilt; 00566 cv::detail::computeTiltProjectionMatrix(k[12], k[13], &matTilt); 00567 00568 for( int y = 0; y < dsize.height; y++ ) 00569 { 00570 Point2f * mxy = mapxy.ptr<Point2f >(y); 00571 for( int x = 0; x < dsize.width; x++ ) 00572 { 00573 Point2f p = (Point2f ((float)x, (float)y) - dcenter)*(1.f/scale); 00574 Point2f q = invMapPointSpherical(p, alpha, projType); 00575 if( q.x <= -FLT_MAX && q.y <= -FLT_MAX ) 00576 { 00577 mxy[x] = Point2f (-1.f, -1.f); 00578 continue; 00579 } 00580 double x2 = q.x*q.x, y2 = q.y*q.y; 00581 double r2 = x2 + y2, _2xy = 2*q.x*q.y; 00582 double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2/(1 + ((k6*r2 + k5)*r2 + k4)*r2); 00583 double xd = (q.x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+ s2*r2*r2); 00584 double yd = (q.y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+ s4*r2*r2); 00585 cv::Vec3d vecTilt = matTilt*cv::Vec3d(xd, yd, 1); 00586 double invProj = vecTilt(2) ? 1./vecTilt(2) : 1; 00587 double u = fx*invProj*vecTilt(0) + cx; 00588 double v = fy*invProj*vecTilt(1) + cy; 00589 00590 mxy[x] = Point2f ((float)u, (float)v); 00591 } 00592 } 00593 00594 if(m1type == CV_32FC2) 00595 { 00596 _map1.create(mapxy.size(), mapxy.type()); 00597 Mat map1 = _map1.getMat(); 00598 mapxy.copyTo(map1); 00599 _map2.release(); 00600 } 00601 else 00602 convertMaps(mapxy, Mat(), _map1, _map2, m1type, false); 00603 00604 return scale; 00605 } 00606 00607 /* End of file */ 00608
Generated on Tue Jul 12 2022 15:17:32 by
1.7.2
