Renesas GR-PEACH OpenCV Development / gr-peach-opencv-project-sd-card_update

Fork of gr-peach-opencv-project-sd-card by the do

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers matrix.cpp Source File

matrix.cpp

00001 /*M///////////////////////////////////////////////////////////////////////////////////////
00002 //
00003 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
00004 //
00005 //  By downloading, copying, installing or using the software you agree to this license.
00006 //  If you do not agree to this license, do not download, install,
00007 //  copy or use the software.
00008 //
00009 //
00010 //                           License Agreement
00011 //                For Open Source Computer Vision Library
00012 //
00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
00015 // Third party copyrights are property of their respective owners.
00016 //
00017 // Redistribution and use in source and binary forms, with or without modification,
00018 // are permitted provided that the following conditions are met:
00019 //
00020 //   * Redistribution's of source code must retain the above copyright notice,
00021 //     this list of conditions and the following disclaimer.
00022 //
00023 //   * Redistribution's in binary form must reproduce the above copyright notice,
00024 //     this list of conditions and the following disclaimer in the documentation
00025 //     and/or other materials provided with the distribution.
00026 //
00027 //   * The name of the copyright holders may not be used to endorse or promote products
00028 //     derived from this software without specific prior written permission.
00029 //
00030 // This software is provided by the copyright holders and contributors "as is" and
00031 // any express or implied warranties, including, but not limited to, the implied
00032 // warranties of merchantability and fitness for a particular purpose are disclaimed.
00033 // In no event shall the Intel Corporation or contributors be liable for any direct,
00034 // indirect, incidental, special, exemplary, or consequential damages
00035 // (including, but not limited to, procurement of substitute goods or services;
00036 // loss of use, data, or profits; or business interruption) however caused
00037 // and on any theory of liability, whether in contract, strict liability,
00038 // or tort (including negligence or otherwise) arising in any way out of
00039 // the use of this software, even if advised of the possibility of such damage.
00040 //
00041 //M*/
00042 
00043 #include "precomp.hpp"
00044 #include "opencl_kernels_core.hpp"
00045 
00046 #include "bufferpool.impl.hpp"
00047 
00048 /****************************************************************************************\
00049 *                           [scaled] Identity matrix initialization                      *
00050 \****************************************************************************************/
00051 
00052 namespace cv {
00053 
00054 void MatAllocator::map(UMatData*, int) const
00055 {
00056 }
00057 
00058 void MatAllocator::unmap(UMatData* u) const
00059 {
00060     if(u->urefcount == 0 && u->refcount == 0)
00061     {
00062         deallocate(u);
00063         u = NULL;
00064     }
00065 }
00066 
00067 void MatAllocator::download(UMatData* u, void* dstptr,
00068          int dims, const size_t sz[],
00069          const size_t srcofs[], const size_t srcstep[],
00070          const size_t dststep[]) const
00071 {
00072     if(!u)
00073         return;
00074     int isz[CV_MAX_DIM];
00075     uchar* srcptr = u->data;
00076     for( int i = 0; i < dims; i++ )
00077     {
00078         CV_Assert( sz[i] <= (size_t)INT_MAX );
00079         if( sz[i] == 0 )
00080         return;
00081         if( srcofs )
00082         srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
00083         isz[i] = (int)sz[i];
00084     }
00085 
00086     Mat src(dims, isz, CV_8U, srcptr, srcstep);
00087     Mat dst(dims, isz, CV_8U, dstptr, dststep);
00088 
00089     const Mat* arrays[] = { &src, &dst };
00090     uchar* ptrs[2];
00091     NAryMatIterator it(arrays, ptrs, 2);
00092     size_t j, planesz = it.size;
00093 
00094     for( j = 0; j < it.nplanes; j++, ++it )
00095         memcpy(ptrs[1], ptrs[0], planesz);
00096 }
00097 
00098 
00099 void MatAllocator::upload(UMatData* u, const void* srcptr, int dims, const size_t sz[],
00100                     const size_t dstofs[], const size_t dststep[],
00101                     const size_t srcstep[]) const
00102 {
00103     if(!u)
00104         return;
00105     int isz[CV_MAX_DIM];
00106     uchar* dstptr = u->data;
00107     for( int i = 0; i < dims; i++ )
00108     {
00109         CV_Assert( sz[i] <= (size_t)INT_MAX );
00110         if( sz[i] == 0 )
00111         return;
00112         if( dstofs )
00113         dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
00114         isz[i] = (int)sz[i];
00115     }
00116 
00117     Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep);
00118     Mat dst(dims, isz, CV_8U, dstptr, dststep);
00119 
00120     const Mat* arrays[] = { &src, &dst };
00121     uchar* ptrs[2];
00122     NAryMatIterator it(arrays, ptrs, 2);
00123     size_t j, planesz = it.size;
00124 
00125     for( j = 0; j < it.nplanes; j++, ++it )
00126         memcpy(ptrs[1], ptrs[0], planesz);
00127 }
00128 
00129 void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t sz[],
00130                   const size_t srcofs[], const size_t srcstep[],
00131                   const size_t dstofs[], const size_t dststep[], bool /*sync*/) const
00132 {
00133     if(!usrc || !udst)
00134         return;
00135     int isz[CV_MAX_DIM];
00136     uchar* srcptr = usrc->data;
00137     uchar* dstptr = udst->data;
00138     for( int i = 0; i < dims; i++ )
00139     {
00140         CV_Assert( sz[i] <= (size_t)INT_MAX );
00141         if( sz[i] == 0 )
00142             return;
00143         if( srcofs )
00144             srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
00145         if( dstofs )
00146             dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
00147         isz[i] = (int)sz[i];
00148     }
00149 
00150     Mat src(dims, isz, CV_8U, srcptr, srcstep);
00151     Mat dst(dims, isz, CV_8U, dstptr, dststep);
00152 
00153     const Mat* arrays[] = { &src, &dst };
00154     uchar* ptrs[2];
00155     NAryMatIterator it(arrays, ptrs, 2);
00156     size_t j, planesz = it.size;
00157 
00158     for( j = 0; j < it.nplanes; j++, ++it )
00159         memcpy(ptrs[1], ptrs[0], planesz);
00160 }
00161 
00162 BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const
00163 {
00164     (void)id;
00165     static DummyBufferPoolController dummy;
00166     return &dummy;
00167 }
00168 
00169 class StdMatAllocator : public MatAllocator
00170 {
00171 public:
00172     UMatData* allocate(int dims, const int* sizes, int type,
00173                        void* data0, size_t* step, int /*flags*/, UMatUsageFlags /*usageFlags*/) const
00174     {
00175         size_t total = CV_ELEM_SIZE(type);
00176         for( int i = dims-1; i >= 0; i-- )
00177         {
00178             if( step )
00179             {
00180                 if( data0 && step[i] != CV_AUTOSTEP )
00181                 {
00182                     CV_Assert(total <= step[i]);
00183                     total = step[i];
00184                 }
00185                 else
00186                     step[i] = total;
00187             }
00188             total *= sizes[i];
00189         }
00190         uchar* data = data0 ? (uchar*)data0 : (uchar*)fastMalloc(total);
00191         UMatData* u = new UMatData(this);
00192         u->data = u->origdata = data;
00193         u->size = total;
00194         if(data0)
00195             u->flags |= UMatData::USER_ALLOCATED;
00196 
00197         return u;
00198     }
00199 
00200     bool allocate(UMatData* u, int /*accessFlags*/, UMatUsageFlags /*usageFlags*/) const
00201     {
00202         if(!u) return false;
00203         return true;
00204     }
00205 
00206     void deallocate(UMatData* u) const
00207     {
00208         if(!u)
00209             return;
00210 
00211         CV_Assert(u->urefcount == 0);
00212         CV_Assert(u->refcount == 0);
00213         if( !(u->flags & UMatData::USER_ALLOCATED) )
00214         {
00215             fastFree(u->origdata);
00216             u->origdata = 0;
00217         }
00218         delete u;
00219     }
00220 };
00221 namespace
00222 {
00223     MatAllocator* g_matAllocator = NULL;
00224 }
00225 
00226 
00227 MatAllocator* Mat::getDefaultAllocator()
00228 {
00229     if (g_matAllocator == NULL)
00230     {
00231         g_matAllocator = getStdAllocator();
00232     }
00233     return g_matAllocator;
00234 }
00235 void Mat::setDefaultAllocator(MatAllocator* allocator)
00236 {
00237     g_matAllocator = allocator;
00238 }
00239 MatAllocator* Mat::getStdAllocator()
00240 {
00241     CV_SINGLETON_LAZY_INIT(MatAllocator, new StdMatAllocator())
00242 }
00243 
00244 void swap( Mat& a, Mat& b )
00245 {
00246     std::swap(a.flags , b.flags );
00247     std::swap(a.dims, b.dims);
00248     std::swap(a.rows, b.rows);
00249     std::swap(a.cols, b.cols);
00250     std::swap(a.data, b.data);
00251     std::swap(a.datastart, b.datastart);
00252     std::swap(a.dataend, b.dataend);
00253     std::swap(a.datalimit, b.datalimit);
00254     std::swap(a.allocator, b.allocator);
00255     std::swap(a.u, b.u);
00256 
00257     std::swap(a.size.p, b.size.p);
00258     std::swap(a.step.p, b.step.p);
00259     std::swap(a.step.buf[0], b.step.buf[0]);
00260     std::swap(a.step.buf[1], b.step.buf[1]);
00261 
00262     if( a.step.p == b.step.buf )
00263     {
00264         a.step.p = a.step.buf;
00265         a.size.p = &a.rows;
00266     }
00267 
00268     if( b.step.p == a.step.buf )
00269     {
00270         b.step.p = b.step.buf;
00271         b.size.p = &b.rows;
00272     }
00273 }
00274 
00275 
00276 static inline void setSize( Mat& m, int _dims, const int* _sz,
00277                             const size_t* _steps, bool autoSteps=false )
00278 {
00279     CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
00280     if( m.dims != _dims )
00281     {
00282         if( m.step.p != m.step.buf )
00283         {
00284             fastFree(m.step.p);
00285             m.step.p = m.step.buf;
00286             m.size.p = &m.rows;
00287         }
00288         if( _dims > 2 )
00289         {
00290             m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
00291             m.size.p = (int*)(m.step.p + _dims) + 1;
00292             m.size.p[-1] = _dims;
00293             m.rows = m.cols = -1;
00294         }
00295     }
00296 
00297     m.dims = _dims;
00298     if( !_sz )
00299         return;
00300 
00301     size_t esz = CV_ELEM_SIZE(m.flags), esz1 = CV_ELEM_SIZE1(m.flags), total = esz;
00302     int i;
00303     for( i = _dims-1; i >= 0; i-- )
00304     {
00305         int s = _sz[i];
00306         CV_Assert( s >= 0 );
00307         m.size.p[i] = s;
00308 
00309         if( _steps )
00310         {
00311             if (_steps[i] % esz1 != 0)
00312             {
00313                 CV_Error(Error::BadStep, "Step must be a multiple of esz1");
00314             }
00315 
00316             m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
00317         }
00318         else if( autoSteps )
00319         {
00320             m.step.p[i] = total;
00321             int64 total1 = (int64)total*s;
00322             if( (uint64)total1 != (size_t)total1 )
00323                 CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
00324             total = (size_t)total1;
00325         }
00326     }
00327 
00328     if( _dims == 1 )
00329     {
00330         m.dims = 2;
00331         m.cols = 1;
00332         m.step[1] = esz;
00333     }
00334 }
00335 
00336 static void updateContinuityFlag(Mat& m)
00337 {
00338     int i, j;
00339     for( i = 0; i < m.dims; i++ )
00340     {
00341         if( m.size[i] > 1 )
00342             break;
00343     }
00344 
00345     for( j = m.dims-1; j > i; j-- )
00346     {
00347         if( m.step[j]*m.size[j] < m.step[j-1] )
00348             break;
00349     }
00350 
00351     uint64 t = (uint64)m.step[0]*m.size[0];
00352     if( j <= i && t == (size_t)t )
00353         m.flags |= Mat::CONTINUOUS_FLAG;
00354     else
00355         m.flags &= ~Mat::CONTINUOUS_FLAG;
00356 }
00357 
00358 static void finalizeHdr(Mat& m)
00359 {
00360     updateContinuityFlag(m);
00361     int d = m.dims;
00362     if( d > 2 )
00363         m.rows = m.cols = -1;
00364     if(m.u)
00365         m.datastart = m.data = m.u->data;
00366     if( m.data )
00367     {
00368         m.datalimit = m.datastart + m.size[0]*m.step[0];
00369         if( m.size[0] > 0 )
00370         {
00371             m.dataend = m.ptr() + m.size[d-1]*m.step[d-1];
00372             for( int i = 0; i < d-1; i++ )
00373                 m.dataend += (m.size[i] - 1)*m.step[i];
00374         }
00375         else
00376             m.dataend = m.datalimit;
00377     }
00378     else
00379         m.dataend = m.datalimit = 0;
00380 }
00381 
00382 
00383 void Mat::create(int d, const int* _sizes, int _type)
00384 {
00385     int i;
00386     CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
00387     _type = CV_MAT_TYPE(_type);
00388 
00389     if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
00390     {
00391         if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
00392             return;
00393         for( i = 0; i < d; i++ )
00394             if( size[i] != _sizes[i] )
00395                 break;
00396         if( i == d && (d > 1 || size[1] == 1))
00397             return;
00398     }
00399 
00400     release();
00401     if( d == 0 )
00402         return;
00403     flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
00404     setSize(*this, d, _sizes, 0, true);
00405 
00406     if( total() > 0 )
00407     {
00408         MatAllocator *a = allocator, *a0 = getDefaultAllocator();
00409 #ifdef HAVE_TGPU
00410         if( !a || a == tegra::getAllocator() )
00411             a = tegra::getAllocator(d, _sizes, _type);
00412 #endif
00413         if(!a)
00414             a = a0;
00415         //try
00416 //        {
00417             u = a->allocate(dims, size, _type, 0, step.p, 0, USAGE_DEFAULT);
00418             CV_Assert(u != 0);
00419         //}
00420 //        catch(...)
00421 //        {
00422 //            if(a != a0)
00423 //                u = a0->allocate(dims, size, _type, 0, step.p, 0, USAGE_DEFAULT);
00424 //            CV_Assert(u != 0);
00425 //        }
00426         CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
00427     }
00428 
00429     addref();
00430     finalizeHdr(*this);
00431 }
00432 
00433 void Mat::copySize(const Mat& m)
00434 {
00435     setSize(*this, m.dims, 0, 0);
00436     for( int i = 0; i < dims; i++ )
00437     {
00438         size[i] = m.size[i];
00439         step[i] = m.step[i];
00440     }
00441 }
00442 
00443 void Mat::deallocate()
00444 {
00445     if(u)
00446         (u->currAllocator ? u->currAllocator : allocator ? allocator : getDefaultAllocator())->unmap(u);
00447     u = NULL;
00448 }
00449 
00450 Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange)
00451     : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
00452       datalimit(0), allocator(0), u(0), size(&rows)
00453 {
00454     CV_Assert( m.dims >= 2 );
00455     if( m.dims > 2 )
00456     {
00457         AutoBuffer<Range> rs(m.dims);
00458         rs[0] = _rowRange;
00459         rs[1] = _colRange;
00460         for( int i = 2; i < m.dims; i++ )
00461             rs[i] = Range::all();
00462         *this = m(rs);
00463         return;
00464     }
00465 
00466     *this = m;
00467     if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
00468     {
00469         CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
00470         rows = _rowRange.size();
00471         data += step*_rowRange.start;
00472         flags  |= SUBMATRIX_FLAG;
00473     }
00474 
00475     if( _colRange != Range::all() && _colRange != Range(0,cols) )
00476     {
00477         CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
00478         cols = _colRange.size();
00479         data += _colRange.start*elemSize();
00480         flags  &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
00481         flags  |= SUBMATRIX_FLAG;
00482     }
00483 
00484     if( rows == 1 )
00485         flags  |= CONTINUOUS_FLAG;
00486 
00487     if( rows <= 0 || cols <= 0 )
00488     {
00489         release();
00490         rows = cols = 0;
00491     }
00492 }
00493 
00494 
00495 Mat::Mat(const Mat& m, const Rect& roi)
00496     : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
00497     data(m.data + roi.y*m.step[0]),
00498     datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit),
00499     allocator(m.allocator), u(m.u), size(&rows)
00500 {
00501     CV_Assert( m.dims <= 2 );
00502     flags  &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
00503     flags  |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
00504 
00505     size_t esz = CV_ELEM_SIZE(flags );
00506     data += roi.x*esz;
00507     CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
00508               0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
00509     if( u )
00510         CV_XADD(&u->refcount, 1);
00511     if( roi.width < m.cols || roi.height < m.rows )
00512         flags  |= SUBMATRIX_FLAG;
00513 
00514     step[0] = m.step[0]; step[1] = esz;
00515 
00516     if( rows <= 0 || cols <= 0 )
00517     {
00518         release();
00519         rows = cols = 0;
00520     }
00521 }
00522 
00523 
00524 Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps)
00525     : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
00526       datalimit(0), allocator(0), u(0), size(&rows)
00527 {
00528     flags  |= CV_MAT_TYPE(_type);
00529     datastart = data = (uchar*)_data;
00530     setSize(*this, _dims, _sizes, _steps, true);
00531     finalizeHdr(*this);
00532 }
00533 
00534 
00535 Mat::Mat(const Mat& m, const Range* ranges)
00536     : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
00537       datalimit(0), allocator(0), u(0), size(&rows)
00538 {
00539     int i, d = m.dims;
00540 
00541     CV_Assert(ranges);
00542     for( i = 0; i < d; i++ )
00543     {
00544         Range r = ranges[i];
00545         CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
00546     }
00547     *this = m;
00548     for( i = 0; i < d; i++ )
00549     {
00550         Range r = ranges[i];
00551         if( r != Range::all() && r != Range(0, size.p[i]))
00552         {
00553             size.p[i] = r.end - r.start;
00554             data += r.start*step.p[i];
00555             flags  |= SUBMATRIX_FLAG;
00556         }
00557     }
00558     updateContinuityFlag(*this);
00559 }
00560 
00561 
00562 static Mat cvMatNDToMat(const CvMatND * m, bool copyData)
00563 {
00564     Mat thiz;
00565 
00566     if( !m )
00567         return thiz;
00568     thiz.datastart = thiz.data = m->data.ptr;
00569     thiz.flags  |= CV_MAT_TYPE(m->type);
00570     int _sizes[CV_MAX_DIM];
00571     size_t _steps[CV_MAX_DIM];
00572 
00573     int i, d = m->dims;
00574     for( i = 0; i < d; i++ )
00575     {
00576         _sizes[i] = m->dim[i].size;
00577         _steps[i] = m->dim[i].step;
00578     }
00579 
00580     setSize(thiz, d, _sizes, _steps);
00581     finalizeHdr(thiz);
00582 
00583     if( copyData )
00584     {
00585         Mat temp(thiz);
00586         thiz.release();
00587         temp.copyTo(thiz);
00588     }
00589 
00590     return thiz;
00591 }
00592 
00593 static Mat cvMatToMat(const CvMat* m, bool copyData)
00594 {
00595     Mat thiz;
00596 
00597     if( !m )
00598         return thiz;
00599 
00600     if( !copyData )
00601     {
00602         thiz.flags  = Mat::MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
00603         thiz.dims = 2;
00604         thiz.rows = m->rows;
00605         thiz.cols = m->cols;
00606         thiz.datastart = thiz.data = m->data.ptr;
00607         size_t esz = CV_ELEM_SIZE(m->type), minstep = thiz.cols*esz, _step = m->step;
00608         if( _step == 0 )
00609             _step = minstep;
00610         thiz.datalimit = thiz.datastart + _step*thiz.rows;
00611         thiz.dataend = thiz.datalimit - _step + minstep;
00612         thiz.step[0] = _step; thiz.step[1] = esz;
00613     }
00614     else
00615     {
00616         thiz.datastart = thiz.dataend = thiz.data = 0;
00617         Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(thiz);
00618     }
00619 
00620     return thiz;
00621 }
00622 
00623 
00624 static Mat iplImageToMat(const IplImage* img, bool copyData)
00625 {
00626     Mat m;
00627 
00628     if( !img )
00629         return m;
00630 
00631     m.dims = 2;
00632     CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
00633 
00634     int imgdepth = IPL2CV_DEPTH(img->depth);
00635     size_t esz;
00636     m.step[0] = img->widthStep;
00637 
00638     if(!img->roi)
00639     {
00640         CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
00641         m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
00642         m.rows = img->height;
00643         m.cols = img->width;
00644         m.datastart = m.data = (uchar*)img->imageData;
00645         esz = CV_ELEM_SIZE(m.flags);
00646     }
00647     else
00648     {
00649         CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
00650         bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
00651         m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
00652         m.rows = img->roi->height;
00653         m.cols = img->roi->width;
00654         esz = CV_ELEM_SIZE(m.flags);
00655         m.datastart = m.data = (uchar*)img->imageData +
00656             (selectedPlane ? (img->roi->coi - 1)*m.step*img->height : 0) +
00657             img->roi->yOffset*m.step[0] + img->roi->xOffset*esz;
00658     }
00659     m.datalimit = m.datastart + m.step.p[0]*m.rows;
00660     m.dataend = m.datastart + m.step.p[0]*(m.rows-1) + esz*m.cols;
00661     m.flags |= (m.cols*esz == m.step.p[0] || m.rows == 1 ? Mat::CONTINUOUS_FLAG : 0);
00662     m.step[1] = esz;
00663 
00664     if( copyData )
00665     {
00666         Mat m2 = m;
00667         m.release();
00668         if( !img->roi || !img->roi->coi ||
00669             img->dataOrder == IPL_DATA_ORDER_PLANE)
00670             m2.copyTo(m);
00671         else
00672         {
00673             int ch[] = {img->roi->coi - 1, 0};
00674             m.create(m2.rows, m2.cols, m2.type());
00675             mixChannels(&m2, 1, &m, 1, ch, 1);
00676         }
00677     }
00678 
00679     return m;
00680 }
00681 
00682 Mat Mat::diag(int d) const
00683 {
00684     CV_Assert( dims <= 2 );
00685     Mat m = *this;
00686     size_t esz = elemSize();
00687     int len;
00688 
00689     if( d >= 0 )
00690     {
00691         len = std::min(cols - d, rows);
00692         m.data += esz*d;
00693     }
00694     else
00695     {
00696         len = std::min(rows + d, cols);
00697         m.data -= step[0]*d;
00698     }
00699     CV_DbgAssert( len > 0 );
00700 
00701     m.size[0] = m.rows = len;
00702     m.size[1] = m.cols = 1;
00703     m.step[0] += (len > 1 ? esz : 0);
00704 
00705     if( m.rows > 1 )
00706         m.flags  &= ~CONTINUOUS_FLAG;
00707     else
00708         m.flags  |= CONTINUOUS_FLAG;
00709 
00710     if( size() != Size(1,1) )
00711         m.flags  |= SUBMATRIX_FLAG;
00712 
00713     return m;
00714 }
00715 
00716 void Mat::pop_back(size_t nelems)
00717 {
00718     CV_Assert( nelems <= (size_t)size.p[0] );
00719 
00720     if( isSubmatrix() )
00721         *this = rowRange(0, size.p[0] - (int)nelems);
00722     else
00723     {
00724         size.p[0] -= (int)nelems;
00725         dataend -= nelems*step.p[0];
00726         /*if( size.p[0] <= 1 )
00727         {
00728             if( dims <= 2 )
00729                 flags |= CONTINUOUS_FLAG;
00730             else
00731                 updateContinuityFlag(*this);
00732         }*/
00733     }
00734 }
00735 
00736 
00737 void Mat::push_back_(const void* elem)
00738 {
00739     int r = size.p[0];
00740     if( isSubmatrix() || dataend + step.p[0] > datalimit )
00741         reserve( std::max(r + 1, (r*3+1)/2) );
00742 
00743     size_t esz = elemSize();
00744     memcpy(data + r*step.p[0], elem, esz);
00745     size.p[0] = r + 1;
00746     dataend += step.p[0];
00747     if( esz < step.p[0] )
00748         flags  &= ~CONTINUOUS_FLAG;
00749 }
00750 
00751 void Mat::reserve(size_t nelems)
00752 {
00753     const size_t MIN_SIZE = 64;
00754 
00755     CV_Assert( (int)nelems >= 0 );
00756     if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
00757         return;
00758 
00759     int r = size.p[0];
00760 
00761     if( (size_t)r >= nelems )
00762         return;
00763 
00764     size.p[0] = std::max((int)nelems, 1);
00765     size_t newsize = total()*elemSize();
00766 
00767     if( newsize < MIN_SIZE )
00768         size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
00769 
00770     Mat m(dims, size.p, type());
00771     size.p[0] = r;
00772     if( r > 0 )
00773     {
00774         Mat mpart = m.rowRange(0, r);
00775         copyTo(mpart);
00776     }
00777 
00778     *this = m;
00779     size.p[0] = r;
00780     dataend = data + step.p[0]*r;
00781 }
00782 
00783 
00784 void Mat::resize(size_t nelems)
00785 {
00786     int saveRows = size.p[0];
00787     if( saveRows == (int)nelems )
00788         return;
00789     CV_Assert( (int)nelems >= 0 );
00790 
00791     if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
00792         reserve(nelems);
00793 
00794     size.p[0] = (int)nelems;
00795     dataend += (size.p[0] - saveRows)*step.p[0];
00796 
00797     //updateContinuityFlag(*this);
00798 }
00799 
00800 
00801 void Mat::resize(size_t nelems, const Scalar & s)
00802 {
00803     int saveRows = size.p[0];
00804     resize(nelems);
00805 
00806     if( size.p[0] > saveRows )
00807     {
00808         Mat part = rowRange(saveRows, size.p[0]);
00809         part = s;
00810     }
00811 }
00812 
00813 void Mat::push_back(const Mat& elems)
00814 {
00815     int r = size.p[0], delta = elems.size.p[0];
00816     if( delta == 0 )
00817         return;
00818     if( this == &elems )
00819     {
00820         Mat tmp = elems;
00821         push_back(tmp);
00822         return;
00823     }
00824     if( !data )
00825     {
00826         *this = elems.clone();
00827         return;
00828     }
00829 
00830     size.p[0] = elems.size.p[0];
00831     bool eq = size == elems.size;
00832     size.p[0] = r;
00833     if( !eq )
00834         CV_Error(CV_StsUnmatchedSizes, "");
00835     if( type() != elems.type() )
00836         CV_Error(CV_StsUnmatchedFormats, "");
00837 
00838     if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
00839         reserve( std::max(r + delta, (r*3+1)/2) );
00840 
00841     size.p[0] += delta;
00842     dataend += step.p[0]*delta;
00843 
00844     //updateContinuityFlag(*this);
00845 
00846     if( isContinuous() && elems.isContinuous() )
00847         memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
00848     else
00849     {
00850         Mat part = rowRange(r, r + delta);
00851         elems.copyTo(part);
00852     }
00853 }
00854 
00855 
00856 Mat cvarrToMat(const CvArr* arr, bool copyData,
00857                bool /*allowND*/, int coiMode, AutoBuffer<double>* abuf )
00858 {
00859     if( !arr )
00860         return Mat();
00861     if( CV_IS_MAT_HDR_Z(arr) )
00862         return cvMatToMat((const CvMat*)arr, copyData);
00863     if( CV_IS_MATND(arr) )
00864         return cvMatNDToMat((const CvMatND *)arr, copyData );
00865     if( CV_IS_IMAGE(arr) )
00866     {
00867         const IplImage* iplimg = (const IplImage*)arr;
00868         if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 )
00869             CV_Error(CV_BadCOI, "COI is not supported by the function");
00870         return iplImageToMat(iplimg, copyData);
00871     }
00872     if( CV_IS_SEQ(arr) )
00873     {
00874         CvSeq* seq = (CvSeq*)arr;
00875         int total = seq->total, type = CV_MAT_TYPE(seq->flags), esz = seq->elem_size;
00876         if( total == 0 )
00877             return Mat();
00878         CV_Assert(total > 0 && CV_ELEM_SIZE(seq->flags) == esz);
00879         if(!copyData && seq->first->next == seq->first)
00880             return Mat(total, 1, type, seq->first->data);
00881         if( abuf )
00882         {
00883             abuf->allocate(((size_t)total*esz + sizeof(double)-1)/sizeof(double));
00884             double* bufdata = *abuf;
00885             cvCvtSeqToArray(seq, bufdata, CV_WHOLE_SEQ);
00886             return Mat(total, 1, type, bufdata);
00887         }
00888 
00889         Mat buf(total, 1, type);
00890         cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ);
00891         return buf;
00892     }
00893     CV_Error(CV_StsBadArg, "Unknown array type");
00894     return Mat();
00895 }
00896 
00897 void Mat::locateROI( Size& wholeSize, Point & ofs ) const
00898 {
00899     CV_Assert( dims <= 2 && step[0] > 0 );
00900     size_t esz = elemSize(), minstep;
00901     ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
00902 
00903     if( delta1 == 0 )
00904         ofs.x = ofs.y = 0;
00905     else
00906     {
00907         ofs.y = (int)(delta1/step[0]);
00908         ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
00909         CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz );
00910     }
00911     minstep = (ofs.x + cols)*esz;
00912     wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
00913     wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
00914     wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
00915     wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
00916 }
00917 
00918 Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
00919 {
00920     CV_Assert( dims <= 2 && step[0] > 0 );
00921     Size wholeSize; Point  ofs;
00922     size_t esz = elemSize();
00923     locateROI( wholeSize, ofs );
00924     int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
00925     int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
00926     data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
00927     rows = row2 - row1; cols = col2 - col1;
00928     size.p[0] = rows; size.p[1] = cols;
00929     if( esz*cols == step[0] || rows == 1 )
00930         flags  |= CONTINUOUS_FLAG;
00931     else
00932         flags  &= ~CONTINUOUS_FLAG;
00933     return *this;
00934 }
00935 
00936 }
00937 
00938 void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
00939 {
00940     Mat mat = cvarrToMat(arr, false, true, 1);
00941     _ch.create(mat.dims, mat.size, mat.depth());
00942     Mat ch = _ch.getMat();
00943     if(coi < 0)
00944     {
00945         CV_Assert( CV_IS_IMAGE(arr) );
00946         coi = cvGetImageCOI((const IplImage*)arr)-1;
00947     }
00948     CV_Assert(0 <= coi && coi < mat.channels());
00949     int _pairs[] = { coi, 0 };
00950     mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
00951 }
00952 
00953 void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
00954 {
00955     Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
00956     if(coi < 0)
00957     {
00958         CV_Assert( CV_IS_IMAGE(arr) );
00959         coi = cvGetImageCOI((const IplImage*)arr)-1;
00960     }
00961     CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
00962     int _pairs[] = { 0, coi };
00963     mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
00964 }
00965 
00966 namespace cv
00967 {
00968 
00969 Mat Mat::reshape(int new_cn, int new_rows) const
00970 {
00971     int cn = channels();
00972     Mat hdr = *this;
00973 
00974     if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
00975     {
00976         hdr.flags  = (hdr.flags  & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
00977         hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags );
00978         hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
00979         return hdr;
00980     }
00981 
00982     CV_Assert( dims <= 2 );
00983 
00984     if( new_cn == 0 )
00985         new_cn = cn;
00986 
00987     int total_width = cols * cn;
00988 
00989     if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
00990         new_rows = rows * total_width / new_cn;
00991 
00992     if( new_rows != 0 && new_rows != rows )
00993     {
00994         int total_size = total_width * rows;
00995         if( !isContinuous() )
00996             CV_Error( CV_BadStep,
00997             "The matrix is not continuous, thus its number of rows can not be changed" );
00998 
00999         if( (unsigned)new_rows > (unsigned)total_size )
01000             CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
01001 
01002         total_width = total_size / new_rows;
01003 
01004         if( total_width * new_rows != total_size )
01005             CV_Error( CV_StsBadArg, "The total number of matrix elements "
01006                                     "is not divisible by the new number of rows" );
01007 
01008         hdr.rows = new_rows;
01009         hdr.step[0] = total_width * elemSize1();
01010     }
01011 
01012     int new_width = total_width / new_cn;
01013 
01014     if( new_width * new_cn != total_width )
01015         CV_Error( CV_BadNumChannels,
01016         "The total width is not divisible by the new number of channels" );
01017 
01018     hdr.cols = new_width;
01019     hdr.flags  = (hdr.flags  & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
01020     hdr.step[1] = CV_ELEM_SIZE(hdr.flags );
01021     return hdr;
01022 }
01023 
01024 Mat Mat::diag(const Mat& d)
01025 {
01026     CV_Assert( d.cols == 1 || d.rows == 1 );
01027     int len = d.rows + d.cols - 1;
01028     Mat m(len, len, d.type(), Scalar (0));
01029     Mat md = m.diag();
01030     if( d.cols == 1 )
01031         d.copyTo(md);
01032     else
01033         transpose(d, md);
01034     return m;
01035 }
01036 
01037 int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
01038 {
01039     return (depth() == _depth || _depth <= 0) &&
01040         (isContinuous() || !_requireContinuous) &&
01041         ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) ||
01042                         (cols == _elemChannels && channels() == 1))) ||
01043         (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
01044          (isContinuous() || step.p[1] == step.p[2]*size.p[2])))
01045     ? (int)(total()*channels()/_elemChannels) : -1;
01046 }
01047 
01048 
01049 void scalarToRawData(const Scalar & s, void* _buf, int type, int unroll_to)
01050 {
01051     int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
01052     CV_Assert(cn <= 4);
01053     switch(depth)
01054     {
01055     case CV_8U:
01056         {
01057         uchar* buf = (uchar*)_buf;
01058         for(i = 0; i < cn; i++)
01059             buf[i] = saturate_cast<uchar>(s.val[i]);
01060         for(; i < unroll_to; i++)
01061             buf[i] = buf[i-cn];
01062         }
01063         break;
01064     case CV_8S:
01065         {
01066         schar* buf = (schar*)_buf;
01067         for(i = 0; i < cn; i++)
01068             buf[i] = saturate_cast<schar>(s.val[i]);
01069         for(; i < unroll_to; i++)
01070             buf[i] = buf[i-cn];
01071         }
01072         break;
01073     case CV_16U:
01074         {
01075         ushort* buf = (ushort*)_buf;
01076         for(i = 0; i < cn; i++)
01077             buf[i] = saturate_cast<ushort>(s.val[i]);
01078         for(; i < unroll_to; i++)
01079             buf[i] = buf[i-cn];
01080         }
01081         break;
01082     case CV_16S:
01083         {
01084         short* buf = (short*)_buf;
01085         for(i = 0; i < cn; i++)
01086             buf[i] = saturate_cast<short>(s.val[i]);
01087         for(; i < unroll_to; i++)
01088             buf[i] = buf[i-cn];
01089         }
01090         break;
01091     case CV_32S:
01092         {
01093         int* buf = (int*)_buf;
01094         for(i = 0; i < cn; i++)
01095             buf[i] = saturate_cast<int>(s.val[i]);
01096         for(; i < unroll_to; i++)
01097             buf[i] = buf[i-cn];
01098         }
01099         break;
01100     case CV_32F:
01101         {
01102         float* buf = (float*)_buf;
01103         for(i = 0; i < cn; i++)
01104             buf[i] = saturate_cast<float>(s.val[i]);
01105         for(; i < unroll_to; i++)
01106             buf[i] = buf[i-cn];
01107         }
01108         break;
01109     case CV_64F:
01110         {
01111         double* buf = (double*)_buf;
01112         for(i = 0; i < cn; i++)
01113             buf[i] = saturate_cast<double>(s.val[i]);
01114         for(; i < unroll_to; i++)
01115             buf[i] = buf[i-cn];
01116         break;
01117         }
01118     default:
01119         CV_Error(CV_StsUnsupportedFormat,"");
01120     }
01121 }
01122 
01123 
01124 /*************************************************************************************************\
01125                                         Input/Output Array
01126 \*************************************************************************************************/
01127 
01128 Mat _InputArray::getMat_(int i) const
01129 {
01130     int k = kind();
01131     int accessFlags = flags & ACCESS_MASK;
01132 
01133     if( k == MAT )
01134     {
01135         const Mat* m = (const Mat*)obj;
01136         if( i < 0 )
01137             return *m;
01138         return m->row(i);
01139     }
01140 
01141     if( k == UMAT )
01142     {
01143         const UMat* m = (const UMat*)obj;
01144         if( i < 0 )
01145             return m->getMat(accessFlags);
01146         return m->getMat(accessFlags).row(i);
01147     }
01148 
01149     if( k == EXPR )
01150     {
01151         CV_Assert( i < 0 );
01152         return (Mat)*((const MatExpr*)obj);
01153     }
01154 
01155     if( k == MATX )
01156     {
01157         CV_Assert( i < 0 );
01158         return Mat(sz, flags, obj);
01159     }
01160 
01161     if( k == STD_VECTOR )
01162     {
01163         CV_Assert( i < 0 );
01164         int t = CV_MAT_TYPE(flags);
01165         const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
01166 
01167         return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
01168     }
01169 
01170     if( k == STD_BOOL_VECTOR )
01171     {
01172         CV_Assert( i < 0 );
01173         int t = CV_8U;
01174         const std::vector<bool>& v = *(const std::vector<bool>*)obj;
01175         int j, n = (int)v.size();
01176         if( n == 0 )
01177             return Mat();
01178         Mat m(1, n, t);
01179         uchar* dst = m.data;
01180         for( j = 0; j < n; j++ )
01181             dst[j] = (uchar)v[j];
01182         return m;
01183     }
01184 
01185     if( k == NONE )
01186         return Mat();
01187 
01188     if( k == STD_VECTOR_VECTOR )
01189     {
01190         int t = type(i);
01191         const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
01192         CV_Assert( 0 <= i && i < (int)vv.size() );
01193         const std::vector<uchar>& v = vv[i];
01194 
01195         return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
01196     }
01197 
01198     if( k == STD_VECTOR_MAT )
01199     {
01200         const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
01201         CV_Assert( 0 <= i && i < (int)v.size() );
01202 
01203         return v[i];
01204     }
01205 
01206     if( k == STD_VECTOR_UMAT )
01207     {
01208         const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
01209         CV_Assert( 0 <= i && i < (int)v.size() );
01210 
01211         return v[i].getMat(accessFlags);
01212     }
01213 
01214     if( k == OPENGL_BUFFER )
01215     {
01216         CV_Assert( i < 0 );
01217         CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapHost/unmapHost methods for ogl::Buffer object");
01218         return Mat();
01219     }
01220 
01221     if( k == CUDA_GPU_MAT )
01222     {
01223         CV_Assert( i < 0 );
01224         CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
01225         return Mat();
01226     }
01227 
01228     if( k == CUDA_HOST_MEM )
01229     {
01230         CV_Assert( i < 0 );
01231 
01232         const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
01233 
01234         return cuda_mem->createMatHeader();
01235     }
01236 
01237     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01238     return Mat();
01239 }
01240 
01241 UMat _InputArray::getUMat(int i) const
01242 {
01243     int k = kind();
01244     int accessFlags = flags & ACCESS_MASK;
01245 
01246     if( k == UMAT )
01247     {
01248         const UMat* m = (const UMat*)obj;
01249         if( i < 0 )
01250             return *m;
01251         return m->row(i);
01252     }
01253 
01254     if( k == STD_VECTOR_UMAT )
01255     {
01256         const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
01257         CV_Assert( 0 <= i && i < (int)v.size() );
01258 
01259         return v[i];
01260     }
01261 
01262     if( k == MAT )
01263     {
01264         const Mat* m = (const Mat*)obj;
01265         if( i < 0 )
01266             return m->getUMat(accessFlags);
01267         return m->row(i).getUMat(accessFlags);
01268     }
01269 
01270     return getMat(i).getUMat(accessFlags);
01271 }
01272 
01273 void _InputArray::getMatVector(std::vector<Mat>& mv) const
01274 {
01275     int k = kind();
01276     int accessFlags = flags & ACCESS_MASK;
01277 
01278     if( k == MAT )
01279     {
01280         const Mat& m = *(const Mat*)obj;
01281         int i, n = (int)m.size[0];
01282         mv.resize(n);
01283 
01284         for( i = 0; i < n; i++ )
01285             mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
01286                 Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
01287         return;
01288     }
01289 
01290     if( k == EXPR )
01291     {
01292         Mat m = *(const MatExpr*)obj;
01293         int i, n = m.size[0];
01294         mv.resize(n);
01295 
01296         for( i = 0; i < n; i++ )
01297             mv[i] = m.row(i);
01298         return;
01299     }
01300 
01301     if( k == MATX )
01302     {
01303         size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
01304         mv.resize(n);
01305 
01306         for( i = 0; i < n; i++ )
01307             mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
01308         return;
01309     }
01310 
01311     if( k == STD_VECTOR )
01312     {
01313         const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
01314 
01315         size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
01316         int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
01317         mv.resize(n);
01318 
01319         for( i = 0; i < n; i++ )
01320             mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
01321         return;
01322     }
01323 
01324     if( k == NONE )
01325     {
01326         mv.clear();
01327         return;
01328     }
01329 
01330     if( k == STD_VECTOR_VECTOR )
01331     {
01332         const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
01333         int i, n = (int)vv.size();
01334         int t = CV_MAT_TYPE(flags);
01335         mv.resize(n);
01336 
01337         for( i = 0; i < n; i++ )
01338         {
01339             const std::vector<uchar>& v = vv[i];
01340             mv[i] = Mat(size(i), t, (void*)&v[0]);
01341         }
01342         return;
01343     }
01344 
01345     if( k == STD_VECTOR_MAT )
01346     {
01347         const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
01348         size_t i, n = v.size();
01349         mv.resize(n);
01350 
01351         for( i = 0; i < n; i++ )
01352             mv[i] = v[i];
01353         return;
01354     }
01355 
01356     if( k == STD_VECTOR_UMAT )
01357     {
01358         const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
01359         size_t i, n = v.size();
01360         mv.resize(n);
01361 
01362         for( i = 0; i < n; i++ )
01363             mv[i] = v[i].getMat(accessFlags);
01364         return;
01365     }
01366 
01367     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01368 }
01369 
01370 void _InputArray::getUMatVector(std::vector<UMat>& umv) const
01371 {
01372     int k = kind();
01373     int accessFlags = flags & ACCESS_MASK;
01374 
01375     if( k == NONE )
01376     {
01377         umv.clear();
01378         return;
01379     }
01380 
01381     if( k == STD_VECTOR_MAT )
01382     {
01383         const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
01384         size_t i, n = v.size();
01385         umv.resize(n);
01386 
01387         for( i = 0; i < n; i++ )
01388             umv[i] = v[i].getUMat(accessFlags);
01389         return;
01390     }
01391 
01392     if( k == STD_VECTOR_UMAT )
01393     {
01394         const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
01395         size_t i, n = v.size();
01396         umv.resize(n);
01397 
01398         for( i = 0; i < n; i++ )
01399             umv[i] = v[i];
01400         return;
01401     }
01402 
01403     if( k == UMAT )
01404     {
01405         UMat& v = *(UMat*)obj;
01406         umv.resize(1);
01407         umv[0] = v;
01408         return;
01409     }
01410     if( k == MAT )
01411     {
01412         Mat& v = *(Mat*)obj;
01413         umv.resize(1);
01414         umv[0] = v.getUMat(accessFlags);
01415         return;
01416     }
01417 
01418     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01419 }
01420 
01421 cuda::GpuMat _InputArray::getGpuMat() const
01422 {
01423     int k = kind();
01424 
01425     if (k == CUDA_GPU_MAT)
01426     {
01427         const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
01428         return *d_mat;
01429     }
01430 
01431     if (k == CUDA_HOST_MEM)
01432     {
01433         const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
01434         return cuda_mem->createGpuMatHeader();
01435     }
01436 
01437     if (k == OPENGL_BUFFER)
01438     {
01439         CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapDevice/unmapDevice methods for ogl::Buffer object");
01440         return cuda::GpuMat();
01441     }
01442 
01443     if (k == NONE)
01444         return cuda::GpuMat();
01445 
01446     CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem");
01447     return cuda::GpuMat();
01448 }
01449 void _InputArray::getGpuMatVector(std::vector<cuda::GpuMat>& gpumv) const
01450 {
01451     int k = kind();
01452     if (k == STD_VECTOR_CUDA_GPU_MAT)
01453     {
01454         gpumv = *(std::vector<cuda::GpuMat>*)obj;
01455     }
01456 }
01457 ogl::Buffer _InputArray::getOGlBuffer() const
01458 {
01459     int k = kind();
01460 
01461     CV_Assert(k == OPENGL_BUFFER);
01462 
01463     const ogl::Buffer* gl_buf = (const ogl::Buffer*)obj;
01464     return *gl_buf;
01465 }
01466 
01467 int _InputArray::kind() const
01468 {
01469     return flags & KIND_MASK;
01470 }
01471 
01472 int _InputArray::rows(int i) const
01473 {
01474     return size(i).height;
01475 }
01476 
01477 int _InputArray::cols(int i) const
01478 {
01479     return size(i).width;
01480 }
01481 
01482 Size _InputArray::size(int i) const
01483 {
01484     int k = kind();
01485 
01486     if( k == MAT )
01487     {
01488         CV_Assert( i < 0 );
01489         return ((const Mat*)obj)->size();
01490     }
01491 
01492     if( k == EXPR )
01493     {
01494         CV_Assert( i < 0 );
01495         return ((const MatExpr*)obj)->size();
01496     }
01497 
01498     if( k == UMAT )
01499     {
01500         CV_Assert( i < 0 );
01501         return ((const UMat*)obj)->size();
01502     }
01503 
01504     if( k == MATX )
01505     {
01506         CV_Assert( i < 0 );
01507         return sz;
01508     }
01509 
01510     if( k == STD_VECTOR )
01511     {
01512         CV_Assert( i < 0 );
01513         const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
01514         const std::vector<int>& iv = *(const std::vector<int>*)obj;
01515         size_t szb = v.size(), szi = iv.size();
01516         return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
01517     }
01518 
01519     if( k == STD_BOOL_VECTOR )
01520     {
01521         CV_Assert( i < 0 );
01522         const std::vector<bool>& v = *(const std::vector<bool>*)obj;
01523         return Size((int)v.size(), 1);
01524     }
01525 
01526     if( k == NONE )
01527         return Size();
01528 
01529     if( k == STD_VECTOR_VECTOR )
01530     {
01531         const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
01532         if( i < 0 )
01533             return vv.empty() ? Size() : Size((int)vv.size(), 1);
01534         CV_Assert( i < (int)vv.size() );
01535         const std::vector<std::vector<int> >& ivv = *(const std::vector<std::vector<int> >*)obj;
01536 
01537         size_t szb = vv[i].size(), szi = ivv[i].size();
01538         return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
01539     }
01540 
01541     if( k == STD_VECTOR_MAT )
01542     {
01543         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01544         if( i < 0 )
01545             return vv.empty() ? Size() : Size((int)vv.size(), 1);
01546         CV_Assert( i < (int)vv.size() );
01547 
01548         return vv[i].size();
01549     }
01550 
01551     if (k == STD_VECTOR_CUDA_GPU_MAT)
01552     {
01553         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
01554         if (i < 0)
01555             return vv.empty() ? Size() : Size((int)vv.size(), 1);
01556         CV_Assert(i < (int)vv.size());
01557         return vv[i].size();
01558     }
01559 
01560     if( k == STD_VECTOR_UMAT )
01561     {
01562         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01563         if( i < 0 )
01564             return vv.empty() ? Size() : Size((int)vv.size(), 1);
01565         CV_Assert( i < (int)vv.size() );
01566 
01567         return vv[i].size();
01568     }
01569 
01570     if( k == OPENGL_BUFFER )
01571     {
01572         CV_Assert( i < 0 );
01573         const ogl::Buffer* buf = (const ogl::Buffer*)obj;
01574         return buf->size();
01575     }
01576 
01577     if( k == CUDA_GPU_MAT )
01578     {
01579         CV_Assert( i < 0 );
01580         const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
01581         return d_mat->size();
01582     }
01583 
01584     if( k == CUDA_HOST_MEM )
01585     {
01586         CV_Assert( i < 0 );
01587         const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
01588         return cuda_mem->size();
01589     }
01590 
01591     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01592     return Size();
01593 }
01594 
01595 int _InputArray::sizend(int* arrsz, int i) const
01596 {
01597     int j, d=0, k = kind();
01598 
01599     if( k == NONE )
01600         ;
01601     else if( k == MAT )
01602     {
01603         CV_Assert( i < 0 );
01604         const Mat& m = *(const Mat*)obj;
01605         d = m.dims;
01606         if(arrsz)
01607             for(j = 0; j < d; j++)
01608                 arrsz[j] = m.size.p[j];
01609     }
01610     else if( k == UMAT )
01611     {
01612         CV_Assert( i < 0 );
01613         const UMat& m = *(const UMat*)obj;
01614         d = m.dims;
01615         if(arrsz)
01616             for(j = 0; j < d; j++)
01617                 arrsz[j] = m.size.p[j];
01618     }
01619     else if( k == STD_VECTOR_MAT && i >= 0 )
01620     {
01621         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01622         CV_Assert( i < (int)vv.size() );
01623         const Mat& m = vv[i];
01624         d = m.dims;
01625         if(arrsz)
01626             for(j = 0; j < d; j++)
01627                 arrsz[j] = m.size.p[j];
01628     }
01629     else if( k == STD_VECTOR_UMAT && i >= 0 )
01630     {
01631         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01632         CV_Assert( i < (int)vv.size() );
01633         const UMat& m = vv[i];
01634         d = m.dims;
01635         if(arrsz)
01636             for(j = 0; j < d; j++)
01637                 arrsz[j] = m.size.p[j];
01638     }
01639     else
01640     {
01641         Size sz2d = size(i);
01642         d = 2;
01643         if(arrsz)
01644         {
01645             arrsz[0] = sz2d.height;
01646             arrsz[1] = sz2d.width;
01647         }
01648     }
01649 
01650     return d;
01651 }
01652 
01653 bool _InputArray::sameSize(const _InputArray& arr) const
01654 {
01655     int k1 = kind(), k2 = arr.kind();
01656     Size sz1;
01657 
01658     if( k1 == MAT )
01659     {
01660         const Mat* m = ((const Mat*)obj);
01661         if( k2 == MAT )
01662             return m->size == ((const Mat*)arr.obj)->size;
01663         if( k2 == UMAT )
01664             return m->size == ((const UMat*)arr.obj)->size;
01665         if( m->dims > 2 )
01666             return false;
01667         sz1 = m->size();
01668     }
01669     else if( k1 == UMAT )
01670     {
01671         const UMat* m = ((const UMat*)obj);
01672         if( k2 == MAT )
01673             return m->size == ((const Mat*)arr.obj)->size;
01674         if( k2 == UMAT )
01675             return m->size == ((const UMat*)arr.obj)->size;
01676         if( m->dims > 2 )
01677             return false;
01678         sz1 = m->size();
01679     }
01680     else
01681         sz1 = size();
01682     if( arr.dims() > 2 )
01683         return false;
01684     return sz1 == arr.size();
01685 }
01686 
01687 int _InputArray::dims(int i) const
01688 {
01689     int k = kind();
01690 
01691     if( k == MAT )
01692     {
01693         CV_Assert( i < 0 );
01694         return ((const Mat*)obj)->dims;
01695     }
01696 
01697     if( k == EXPR )
01698     {
01699         CV_Assert( i < 0 );
01700         return ((const MatExpr*)obj)->a.dims;
01701     }
01702 
01703     if( k == UMAT )
01704     {
01705         CV_Assert( i < 0 );
01706         return ((const UMat*)obj)->dims;
01707     }
01708 
01709     if( k == MATX )
01710     {
01711         CV_Assert( i < 0 );
01712         return 2;
01713     }
01714 
01715     if( k == STD_VECTOR || k == STD_BOOL_VECTOR )
01716     {
01717         CV_Assert( i < 0 );
01718         return 2;
01719     }
01720 
01721     if( k == NONE )
01722         return 0;
01723 
01724     if( k == STD_VECTOR_VECTOR )
01725     {
01726         const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
01727         if( i < 0 )
01728             return 1;
01729         CV_Assert( i < (int)vv.size() );
01730         return 2;
01731     }
01732 
01733     if( k == STD_VECTOR_MAT )
01734     {
01735         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01736         if( i < 0 )
01737             return 1;
01738         CV_Assert( i < (int)vv.size() );
01739 
01740         return vv[i].dims;
01741     }
01742 
01743     if( k == STD_VECTOR_UMAT )
01744     {
01745         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01746         if( i < 0 )
01747             return 1;
01748         CV_Assert( i < (int)vv.size() );
01749 
01750         return vv[i].dims;
01751     }
01752 
01753     if( k == OPENGL_BUFFER )
01754     {
01755         CV_Assert( i < 0 );
01756         return 2;
01757     }
01758 
01759     if( k == CUDA_GPU_MAT )
01760     {
01761         CV_Assert( i < 0 );
01762         return 2;
01763     }
01764 
01765     if( k == CUDA_HOST_MEM )
01766     {
01767         CV_Assert( i < 0 );
01768         return 2;
01769     }
01770 
01771     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01772     return 0;
01773 }
01774 
01775 size_t _InputArray::total(int i) const
01776 {
01777     int k = kind();
01778 
01779     if( k == MAT )
01780     {
01781         CV_Assert( i < 0 );
01782         return ((const Mat*)obj)->total();
01783     }
01784 
01785     if( k == UMAT )
01786     {
01787         CV_Assert( i < 0 );
01788         return ((const UMat*)obj)->total();
01789     }
01790 
01791     if( k == STD_VECTOR_MAT )
01792     {
01793         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01794         if( i < 0 )
01795             return vv.size();
01796 
01797         CV_Assert( i < (int)vv.size() );
01798         return vv[i].total();
01799     }
01800 
01801 
01802     if( k == STD_VECTOR_UMAT )
01803     {
01804         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01805         if( i < 0 )
01806             return vv.size();
01807 
01808         CV_Assert( i < (int)vv.size() );
01809         return vv[i].total();
01810     }
01811 
01812     return size(i).area();
01813 }
01814 
01815 int _InputArray::type(int i) const
01816 {
01817     int k = kind();
01818 
01819     if( k == MAT )
01820         return ((const Mat*)obj)->type();
01821 
01822     if( k == UMAT )
01823         return ((const UMat*)obj)->type();
01824 
01825     if( k == EXPR )
01826         return ((const MatExpr*)obj)->type();
01827 
01828     if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
01829         return CV_MAT_TYPE(flags);
01830 
01831     if( k == NONE )
01832         return -1;
01833 
01834     if( k == STD_VECTOR_UMAT )
01835     {
01836         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01837         if( vv.empty() )
01838         {
01839             CV_Assert((flags & FIXED_TYPE) != 0);
01840             return CV_MAT_TYPE(flags);
01841         }
01842         CV_Assert( i < (int)vv.size() );
01843         return vv[i >= 0 ? i : 0].type();
01844     }
01845 
01846     if( k == STD_VECTOR_MAT )
01847     {
01848         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01849         if( vv.empty() )
01850         {
01851             CV_Assert((flags & FIXED_TYPE) != 0);
01852             return CV_MAT_TYPE(flags);
01853         }
01854         CV_Assert( i < (int)vv.size() );
01855         return vv[i >= 0 ? i : 0].type();
01856     }
01857 
01858     if (k == STD_VECTOR_CUDA_GPU_MAT)
01859     {
01860         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
01861         if (vv.empty())
01862         {
01863             CV_Assert((flags & FIXED_TYPE) != 0);
01864             return CV_MAT_TYPE(flags);
01865         }
01866         CV_Assert(i < (int)vv.size());
01867         return vv[i >= 0 ? i : 0].type();
01868     }
01869 
01870     if( k == OPENGL_BUFFER )
01871         return ((const ogl::Buffer*)obj)->type();
01872 
01873     if( k == CUDA_GPU_MAT )
01874         return ((const cuda::GpuMat*)obj)->type();
01875 
01876     if( k == CUDA_HOST_MEM )
01877         return ((const cuda::HostMem*)obj)->type();
01878 
01879     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01880     return 0;
01881 }
01882 
01883 int _InputArray::depth(int i) const
01884 {
01885     return CV_MAT_DEPTH(type(i));
01886 }
01887 
01888 int _InputArray::channels(int i) const
01889 {
01890     return CV_MAT_CN(type(i));
01891 }
01892 
01893 bool _InputArray::empty() const
01894 {
01895     int k = kind();
01896 
01897     if( k == MAT )
01898         return ((const Mat*)obj)->empty();
01899 
01900     if( k == UMAT )
01901         return ((const UMat*)obj)->empty();
01902 
01903     if( k == EXPR )
01904         return false;
01905 
01906     if( k == MATX )
01907         return false;
01908 
01909     if( k == STD_VECTOR )
01910     {
01911         const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
01912         return v.empty();
01913     }
01914 
01915     if( k == STD_BOOL_VECTOR )
01916     {
01917         const std::vector<bool>& v = *(const std::vector<bool>*)obj;
01918         return v.empty();
01919     }
01920 
01921     if( k == NONE )
01922         return true;
01923 
01924     if( k == STD_VECTOR_VECTOR )
01925     {
01926         const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
01927         return vv.empty();
01928     }
01929 
01930     if( k == STD_VECTOR_MAT )
01931     {
01932         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01933         return vv.empty();
01934     }
01935 
01936     if( k == STD_VECTOR_UMAT )
01937     {
01938         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01939         return vv.empty();
01940     }
01941 
01942     if( k == OPENGL_BUFFER )
01943         return ((const ogl::Buffer*)obj)->empty();
01944 
01945     if( k == CUDA_GPU_MAT )
01946         return ((const cuda::GpuMat*)obj)->empty();
01947 
01948     if (k == STD_VECTOR_CUDA_GPU_MAT)
01949     {
01950         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
01951         return vv.empty();
01952     }
01953 
01954     if( k == CUDA_HOST_MEM )
01955         return ((const cuda::HostMem*)obj)->empty();
01956 
01957     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
01958     return true;
01959 }
01960 
01961 bool _InputArray::isContinuous(int i) const
01962 {
01963     int k = kind();
01964 
01965     if( k == MAT )
01966         return i < 0 ? ((const Mat*)obj)->isContinuous() : true;
01967 
01968     if( k == UMAT )
01969         return i < 0 ? ((const UMat*)obj)->isContinuous() : true;
01970 
01971     if( k == EXPR || k == MATX || k == STD_VECTOR ||
01972         k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
01973         return true;
01974 
01975     if( k == STD_VECTOR_MAT )
01976     {
01977         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
01978         CV_Assert((size_t)i < vv.size());
01979         return vv[i].isContinuous();
01980     }
01981 
01982     if( k == STD_VECTOR_UMAT )
01983     {
01984         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
01985         CV_Assert((size_t)i < vv.size());
01986         return vv[i].isContinuous();
01987     }
01988 
01989     CV_Error(CV_StsNotImplemented, "Unknown/unsupported array type");
01990     return false;
01991 }
01992 
01993 bool _InputArray::isSubmatrix(int i) const
01994 {
01995     int k = kind();
01996 
01997     if( k == MAT )
01998         return i < 0 ? ((const Mat*)obj)->isSubmatrix() : false;
01999 
02000     if( k == UMAT )
02001         return i < 0 ? ((const UMat*)obj)->isSubmatrix() : false;
02002 
02003     if( k == EXPR || k == MATX || k == STD_VECTOR ||
02004         k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
02005         return false;
02006 
02007     if( k == STD_VECTOR_MAT )
02008     {
02009         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
02010         CV_Assert((size_t)i < vv.size());
02011         return vv[i].isSubmatrix();
02012     }
02013 
02014     if( k == STD_VECTOR_UMAT )
02015     {
02016         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
02017         CV_Assert((size_t)i < vv.size());
02018         return vv[i].isSubmatrix();
02019     }
02020 
02021     CV_Error(CV_StsNotImplemented, "");
02022     return false;
02023 }
02024 
02025 size_t _InputArray::offset(int i) const
02026 {
02027     int k = kind();
02028 
02029     if( k == MAT )
02030     {
02031         CV_Assert( i < 0 );
02032         const Mat * const m = ((const Mat*)obj);
02033         return (size_t)(m->ptr() - m->datastart);
02034     }
02035 
02036     if( k == UMAT )
02037     {
02038         CV_Assert( i < 0 );
02039         return ((const UMat*)obj)->offset;
02040     }
02041 
02042     if( k == EXPR || k == MATX || k == STD_VECTOR ||
02043         k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
02044         return 0;
02045 
02046     if( k == STD_VECTOR_MAT )
02047     {
02048         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
02049         if( i < 0 )
02050             return 1;
02051         CV_Assert( i < (int)vv.size() );
02052 
02053         return (size_t)(vv[i].ptr() - vv[i].datastart);
02054     }
02055 
02056     if( k == STD_VECTOR_UMAT )
02057     {
02058         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
02059         CV_Assert((size_t)i < vv.size());
02060         return vv[i].offset;
02061     }
02062 
02063     if( k == CUDA_GPU_MAT )
02064     {
02065         CV_Assert( i < 0 );
02066         const cuda::GpuMat * const m = ((const cuda::GpuMat*)obj);
02067         return (size_t)(m->data - m->datastart);
02068     }
02069 
02070     if (k == STD_VECTOR_CUDA_GPU_MAT)
02071     {
02072         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
02073         CV_Assert((size_t)i < vv.size());
02074         return (size_t)(vv[i].data - vv[i].datastart);
02075     }
02076 
02077     CV_Error(Error::StsNotImplemented, "");
02078     return 0;
02079 }
02080 
02081 size_t _InputArray::step(int i) const
02082 {
02083     int k = kind();
02084 
02085     if( k == MAT )
02086     {
02087         CV_Assert( i < 0 );
02088         return ((const Mat*)obj)->step;
02089     }
02090 
02091     if( k == UMAT )
02092     {
02093         CV_Assert( i < 0 );
02094         return ((const UMat*)obj)->step;
02095     }
02096 
02097     if( k == EXPR || k == MATX || k == STD_VECTOR ||
02098         k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
02099         return 0;
02100 
02101     if( k == STD_VECTOR_MAT )
02102     {
02103         const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
02104         if( i < 0 )
02105             return 1;
02106         CV_Assert( i < (int)vv.size() );
02107         return vv[i].step;
02108     }
02109 
02110     if( k == STD_VECTOR_UMAT )
02111     {
02112         const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
02113         CV_Assert((size_t)i < vv.size());
02114         return vv[i].step;
02115     }
02116 
02117     if( k == CUDA_GPU_MAT )
02118     {
02119         CV_Assert( i < 0 );
02120         return ((const cuda::GpuMat*)obj)->step;
02121     }
02122     if (k == STD_VECTOR_CUDA_GPU_MAT)
02123     {
02124         const std::vector<cuda::GpuMat>& vv = *(const std::vector<cuda::GpuMat>*)obj;
02125         CV_Assert((size_t)i < vv.size());
02126         return vv[i].step;
02127     }
02128 
02129     CV_Error(Error::StsNotImplemented, "");
02130     return 0;
02131 }
02132 
02133 void _InputArray::copyTo(const _OutputArray& arr) const
02134 {
02135     int k = kind();
02136 
02137     if( k == NONE )
02138         arr.release();
02139     else if( k == MAT || k == MATX || k == STD_VECTOR || k == STD_BOOL_VECTOR )
02140     {
02141         Mat m = getMat();
02142         m.copyTo(arr);
02143     }
02144     else if( k == EXPR )
02145     {
02146         const MatExpr& e = *((MatExpr*)obj);
02147         if( arr.kind() == MAT )
02148             arr.getMatRef() = e;
02149         else
02150             Mat(e).copyTo(arr);
02151     }
02152     else if( k == UMAT )
02153         ((UMat*)obj)->copyTo(arr);
02154     else
02155         CV_Error(Error::StsNotImplemented, "");
02156 }
02157 
02158 void _InputArray::copyTo(const _OutputArray& arr, const _InputArray & mask) const
02159 {
02160     int k = kind();
02161 
02162     if( k == NONE )
02163         arr.release();
02164     else if( k == MAT || k == MATX || k == STD_VECTOR || k == STD_BOOL_VECTOR )
02165     {
02166         Mat m = getMat();
02167         m.copyTo(arr, mask);
02168     }
02169     else if( k == UMAT )
02170         ((UMat*)obj)->copyTo(arr, mask);
02171     else
02172         CV_Error(Error::StsNotImplemented, "");
02173 }
02174 
02175 bool _OutputArray::fixedSize() const
02176 {
02177     return (flags & FIXED_SIZE) == FIXED_SIZE;
02178 }
02179 
02180 bool _OutputArray::fixedType() const
02181 {
02182     return (flags & FIXED_TYPE) == FIXED_TYPE;
02183 }
02184 
02185 void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
02186 {
02187     int k = kind();
02188     if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02189     {
02190         CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == _sz);
02191         CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
02192         ((Mat*)obj)->create(_sz, mtype);
02193         return;
02194     }
02195     if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02196     {
02197         CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == _sz);
02198         CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
02199         ((UMat*)obj)->create(_sz, mtype);
02200         return;
02201     }
02202     if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02203     {
02204         CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
02205         CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
02206         ((cuda::GpuMat*)obj)->create(_sz, mtype);
02207         return;
02208     }
02209     if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02210     {
02211         CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == _sz);
02212         CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
02213         ((ogl::Buffer*)obj)->create(_sz, mtype);
02214         return;
02215     }
02216     if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02217     {
02218         CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == _sz);
02219         CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
02220         ((cuda::HostMem*)obj)->create(_sz, mtype);
02221         return;
02222     }
02223     int sizes[] = {_sz.height, _sz.width};
02224     create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
02225 }
02226 
02227 void _OutputArray::create(int _rows, int _cols, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
02228 {
02229     int k = kind();
02230     if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02231     {
02232         CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == Size(_cols, _rows));
02233         CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
02234         ((Mat*)obj)->create(_rows, _cols, mtype);
02235         return;
02236     }
02237     if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02238     {
02239         CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == Size(_cols, _rows));
02240         CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
02241         ((UMat*)obj)->create(_rows, _cols, mtype);
02242         return;
02243     }
02244     if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02245     {
02246         CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(_cols, _rows));
02247         CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
02248         ((cuda::GpuMat*)obj)->create(_rows, _cols, mtype);
02249         return;
02250     }
02251     if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02252     {
02253         CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == Size(_cols, _rows));
02254         CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
02255         ((ogl::Buffer*)obj)->create(_rows, _cols, mtype);
02256         return;
02257     }
02258     if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
02259     {
02260         CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == Size(_cols, _rows));
02261         CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
02262         ((cuda::HostMem*)obj)->create(_rows, _cols, mtype);
02263         return;
02264     }
02265     int sizes[] = {_rows, _cols};
02266     create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
02267 }
02268 
02269 void _OutputArray::create(int d, const int* sizes, int mtype, int i,
02270                           bool allowTransposed, int fixedDepthMask) const
02271 {
02272     int k = kind();
02273     mtype = CV_MAT_TYPE(mtype);
02274 
02275     if( k == MAT )
02276     {
02277         CV_Assert( i < 0 );
02278         Mat& m = *(Mat*)obj;
02279         if( allowTransposed )
02280         {
02281             if( !m.isContinuous() )
02282             {
02283                 CV_Assert(!fixedType() && !fixedSize());
02284                 m.release();
02285             }
02286 
02287             if( d == 2 && m.dims == 2 && m.data &&
02288                 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
02289                 return;
02290         }
02291 
02292         if(fixedType())
02293         {
02294             if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
02295                 mtype = m.type();
02296             else
02297                 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
02298         }
02299         if(fixedSize())
02300         {
02301             CV_Assert(m.dims == d);
02302             for(int j = 0; j < d; ++j)
02303                 CV_Assert(m.size[j] == sizes[j]);
02304         }
02305         m.create(d, sizes, mtype);
02306         return;
02307     }
02308 
02309     if( k == UMAT )
02310     {
02311         CV_Assert( i < 0 );
02312         UMat& m = *(UMat*)obj;
02313         if( allowTransposed )
02314         {
02315             if( !m.isContinuous() )
02316             {
02317                 CV_Assert(!fixedType() && !fixedSize());
02318                 m.release();
02319             }
02320 
02321             if( d == 2 && m.dims == 2 && !m.empty() &&
02322                 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
02323                 return;
02324         }
02325 
02326         if(fixedType())
02327         {
02328             if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
02329                 mtype = m.type();
02330             else
02331                 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
02332         }
02333         if(fixedSize())
02334         {
02335             CV_Assert(m.dims == d);
02336             for(int j = 0; j < d; ++j)
02337                 CV_Assert(m.size[j] == sizes[j]);
02338         }
02339         m.create(d, sizes, mtype);
02340         return;
02341     }
02342 
02343     if( k == MATX )
02344     {
02345         CV_Assert( i < 0 );
02346         int type0 = CV_MAT_TYPE(flags);
02347         CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
02348         CV_Assert( d == 2 && ((sizes[0] == sz.height && sizes[1] == sz.width) ||
02349                                  (allowTransposed && sizes[0] == sz.width && sizes[1] == sz.height)));
02350         return;
02351     }
02352 
02353     if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
02354     {
02355         CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
02356         size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0;
02357         std::vector<uchar>* v = (std::vector<uchar>*)obj;
02358 
02359         if( k == STD_VECTOR_VECTOR )
02360         {
02361             std::vector<std::vector<uchar> >& vv = *(std::vector<std::vector<uchar> >*)obj;
02362             if( i < 0 )
02363             {
02364                 CV_Assert(!fixedSize() || len == vv.size());
02365                 vv.resize(len);
02366                 return;
02367             }
02368             CV_Assert( i < (int)vv.size() );
02369             v = &vv[i];
02370         }
02371         else
02372             CV_Assert( i < 0 );
02373 
02374         int type0 = CV_MAT_TYPE(flags);
02375         CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
02376 
02377         int esz = CV_ELEM_SIZE(type0);
02378         CV_Assert(!fixedSize() || len == ((std::vector<uchar>*)v)->size() / esz);
02379         switch( esz )
02380         {
02381         case 1:
02382             ((std::vector<uchar>*)v)->resize(len);
02383             break;
02384         case 2:
02385             ((std::vector<Vec2b>*)v)->resize(len);
02386             break;
02387         case 3:
02388             ((std::vector<Vec3b>*)v)->resize(len);
02389             break;
02390         case 4:
02391             ((std::vector<int>*)v)->resize(len);
02392             break;
02393         case 6:
02394             ((std::vector<Vec3s>*)v)->resize(len);
02395             break;
02396         case 8:
02397             ((std::vector<Vec2i>*)v)->resize(len);
02398             break;
02399         case 12:
02400             ((std::vector<Vec3i>*)v)->resize(len);
02401             break;
02402         case 16:
02403             ((std::vector<Vec4i>*)v)->resize(len);
02404             break;
02405         case 24:
02406             ((std::vector<Vec6i>*)v)->resize(len);
02407             break;
02408         case 32:
02409             ((std::vector<Vec8i>*)v)->resize(len);
02410             break;
02411         case 36:
02412             ((std::vector<Vec<int, 9> >*)v)->resize(len);
02413             break;
02414         case 48:
02415             ((std::vector<Vec<int, 12> >*)v)->resize(len);
02416             break;
02417         case 64:
02418             ((std::vector<Vec<int, 16> >*)v)->resize(len);
02419             break;
02420         case 128:
02421             ((std::vector<Vec<int, 32> >*)v)->resize(len);
02422             break;
02423         case 256:
02424             ((std::vector<Vec<int, 64> >*)v)->resize(len);
02425             break;
02426         case 512:
02427             ((std::vector<Vec<int, 128> >*)v)->resize(len);
02428             break;
02429         default:
02430             CV_Error_(CV_StsBadArg, ("Vectors with element size %d are not supported. Please, modify OutputArray::create()\n", esz));
02431         }
02432         return;
02433     }
02434 
02435     if( k == NONE )
02436     {
02437         CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
02438         return;
02439     }
02440 
02441     if( k == STD_VECTOR_MAT )
02442     {
02443         std::vector<Mat>& v = *(std::vector<Mat>*)obj;
02444 
02445         if( i < 0 )
02446         {
02447             CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
02448             size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
02449 
02450             CV_Assert(!fixedSize() || len == len0);
02451             v.resize(len);
02452             if( fixedType() )
02453             {
02454                 int _type = CV_MAT_TYPE(flags);
02455                 for( size_t j = len0; j < len; j++ )
02456                 {
02457                     if( v[j].type() == _type )
02458                         continue;
02459                     CV_Assert( v[j].empty() );
02460                     v[j].flags = (v[j].flags & ~CV_MAT_TYPE_MASK) | _type;
02461                 }
02462             }
02463             return;
02464         }
02465 
02466         CV_Assert( i < (int)v.size() );
02467         Mat& m = v[i];
02468 
02469         if( allowTransposed )
02470         {
02471             if( !m.isContinuous() )
02472             {
02473                 CV_Assert(!fixedType() && !fixedSize());
02474                 m.release();
02475             }
02476 
02477             if( d == 2 && m.dims == 2 && m.data &&
02478                 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
02479                 return;
02480         }
02481 
02482         if(fixedType())
02483         {
02484             if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
02485                 mtype = m.type();
02486             else
02487                 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
02488         }
02489         if(fixedSize())
02490         {
02491             CV_Assert(m.dims == d);
02492             for(int j = 0; j < d; ++j)
02493                 CV_Assert(m.size[j] == sizes[j]);
02494         }
02495 
02496         m.create(d, sizes, mtype);
02497         return;
02498     }
02499 
02500     if( k == STD_VECTOR_UMAT )
02501     {
02502         std::vector<UMat>& v = *(std::vector<UMat>*)obj;
02503 
02504         if( i < 0 )
02505         {
02506             CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
02507             size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
02508 
02509             CV_Assert(!fixedSize() || len == len0);
02510             v.resize(len);
02511             if( fixedType() )
02512             {
02513                 int _type = CV_MAT_TYPE(flags);
02514                 for( size_t j = len0; j < len; j++ )
02515                 {
02516                     if( v[j].type() == _type )
02517                         continue;
02518                     CV_Assert( v[j].empty() );
02519                     v[j].flags = (v[j].flags & ~CV_MAT_TYPE_MASK) | _type;
02520                 }
02521             }
02522             return;
02523         }
02524 
02525         CV_Assert( i < (int)v.size() );
02526         UMat& m = v[i];
02527 
02528         if( allowTransposed )
02529         {
02530             if( !m.isContinuous() )
02531             {
02532                 CV_Assert(!fixedType() && !fixedSize());
02533                 m.release();
02534             }
02535 
02536             if( d == 2 && m.dims == 2 && m.u &&
02537                 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
02538                 return;
02539         }
02540 
02541         if(fixedType())
02542         {
02543             if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
02544                 mtype = m.type();
02545             else
02546                 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
02547         }
02548         if(fixedSize())
02549         {
02550             CV_Assert(m.dims == d);
02551             for(int j = 0; j < d; ++j)
02552                 CV_Assert(m.size[j] == sizes[j]);
02553         }
02554 
02555         m.create(d, sizes, mtype);
02556         return;
02557     }
02558 
02559     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
02560 }
02561 
02562 void _OutputArray::createSameSize(const _InputArray& arr, int mtype) const
02563 {
02564     int arrsz[CV_MAX_DIM], d = arr.sizend(arrsz);
02565     create(d, arrsz, mtype);
02566 }
02567 
02568 void _OutputArray::release() const
02569 {
02570     CV_Assert(!fixedSize());
02571 
02572     int k = kind();
02573 
02574     if( k == MAT )
02575     {
02576         ((Mat*)obj)->release();
02577         return;
02578     }
02579 
02580     if( k == UMAT )
02581     {
02582         ((UMat*)obj)->release();
02583         return;
02584     }
02585 
02586     if( k == CUDA_GPU_MAT )
02587     {
02588         ((cuda::GpuMat*)obj)->release();
02589         return;
02590     }
02591 
02592     if( k == CUDA_HOST_MEM )
02593     {
02594         ((cuda::HostMem*)obj)->release();
02595         return;
02596     }
02597 
02598     if( k == OPENGL_BUFFER )
02599     {
02600         ((ogl::Buffer*)obj)->release();
02601         return;
02602     }
02603 
02604     if( k == NONE )
02605         return;
02606 
02607     if( k == STD_VECTOR )
02608     {
02609         create(Size(), CV_MAT_TYPE(flags));
02610         return;
02611     }
02612 
02613     if( k == STD_VECTOR_VECTOR )
02614     {
02615         ((std::vector<std::vector<uchar> >*)obj)->clear();
02616         return;
02617     }
02618 
02619     if( k == STD_VECTOR_MAT )
02620     {
02621         ((std::vector<Mat>*)obj)->clear();
02622         return;
02623     }
02624 
02625     if( k == STD_VECTOR_UMAT )
02626     {
02627         ((std::vector<UMat>*)obj)->clear();
02628         return;
02629     }
02630     if (k == STD_VECTOR_CUDA_GPU_MAT)
02631     {
02632         ((std::vector<cuda::GpuMat>*)obj)->clear();
02633         return;
02634     }
02635     CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
02636 }
02637 
02638 void _OutputArray::clear() const
02639 {
02640     int k = kind();
02641 
02642     if( k == MAT )
02643     {
02644         CV_Assert(!fixedSize());
02645         ((Mat*)obj)->resize(0);
02646         return;
02647     }
02648 
02649     release();
02650 }
02651 
02652 bool _OutputArray::needed() const
02653 {
02654     return kind() != NONE;
02655 }
02656 
02657 Mat& _OutputArray::getMatRef(int i) const
02658 {
02659     int k = kind();
02660     if( i < 0 )
02661     {
02662         CV_Assert( k == MAT );
02663         return *(Mat*)obj;
02664     }
02665     else
02666     {
02667         CV_Assert( k == STD_VECTOR_MAT );
02668         std::vector<Mat>& v = *(std::vector<Mat>*)obj;
02669         CV_Assert( i < (int)v.size() );
02670         return v[i];
02671     }
02672 }
02673 
02674 UMat& _OutputArray::getUMatRef(int i) const
02675 {
02676     int k = kind();
02677     if( i < 0 )
02678     {
02679         CV_Assert( k == UMAT );
02680         return *(UMat*)obj;
02681     }
02682     else
02683     {
02684         CV_Assert( k == STD_VECTOR_UMAT );
02685         std::vector<UMat>& v = *(std::vector<UMat>*)obj;
02686         CV_Assert( i < (int)v.size() );
02687         return v[i];
02688     }
02689 }
02690 
02691 cuda::GpuMat& _OutputArray::getGpuMatRef() const
02692 {
02693     int k = kind();
02694     CV_Assert( k == CUDA_GPU_MAT );
02695     return *(cuda::GpuMat*)obj;
02696 }
02697 std::vector<cuda::GpuMat>& _OutputArray::getGpuMatVecRef() const
02698 {
02699     int k = kind();
02700     CV_Assert(k == STD_VECTOR_CUDA_GPU_MAT);
02701     return *(std::vector<cuda::GpuMat>*)obj;
02702 }
02703 
02704 ogl::Buffer& _OutputArray::getOGlBufferRef() const
02705 {
02706     int k = kind();
02707     CV_Assert( k == OPENGL_BUFFER );
02708     return *(ogl::Buffer*)obj;
02709 }
02710 
02711 cuda::HostMem& _OutputArray::getHostMemRef() const
02712 {
02713     int k = kind();
02714     CV_Assert( k == CUDA_HOST_MEM );
02715     return *(cuda::HostMem*)obj;
02716 }
02717 
02718 void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
02719 {
02720     int k = kind();
02721 
02722     if( k == NONE )
02723         ;
02724     else if( k == MAT || k == MATX || k == STD_VECTOR )
02725     {
02726         Mat m = getMat();
02727         m.setTo(arr, mask);
02728     }
02729     else if( k == UMAT )
02730         ((UMat*)obj)->setTo(arr, mask);
02731     else if( k == CUDA_GPU_MAT )
02732     {
02733         Mat value = arr.getMat();
02734         CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::CUDA_GPU_MAT) );
02735         ((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
02736     }
02737     else
02738         CV_Error(Error::StsNotImplemented, "");
02739 }
02740 
02741 
02742 void _OutputArray::assign(const UMat& u) const
02743 {
02744     int k = kind();
02745     if (k == UMAT)
02746     {
02747         *(UMat*)obj = u;
02748     }
02749     else if (k == MAT)
02750     {
02751         u.copyTo(*(Mat*)obj); // TODO check u.getMat()
02752     }
02753     else if (k == MATX)
02754     {
02755         u.copyTo(getMat()); // TODO check u.getMat()
02756     }
02757     else
02758     {
02759         CV_Error(Error::StsNotImplemented, "");
02760     }
02761 }
02762 
02763 
02764 void _OutputArray::assign(const Mat& m) const
02765 {
02766     int k = kind();
02767     if (k == UMAT)
02768     {
02769         m.copyTo(*(UMat*)obj); // TODO check m.getUMat()
02770     }
02771     else if (k == MAT)
02772     {
02773         *(Mat*)obj = m;
02774     }
02775     else if (k == MATX)
02776     {
02777         m.copyTo(getMat());
02778     }
02779     else
02780     {
02781         CV_Error(Error::StsNotImplemented, "");
02782     }
02783 }
02784 
02785 
02786 static _InputOutputArray _none;
02787 InputOutputArray noArray() { return _none; }
02788 
02789 }
02790 
02791 /*************************************************************************************************\
02792                                         Matrix Operations
02793 \*************************************************************************************************/
02794 
02795 void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
02796 {
02797     if( nsrc == 0 || !src )
02798     {
02799         _dst.release();
02800         return;
02801     }
02802 
02803     int totalCols = 0, cols = 0;
02804     size_t i;
02805     for( i = 0; i < nsrc; i++ )
02806     {
02807         CV_Assert( src[i].dims <= 2 &&
02808                    src[i].rows == src[0].rows &&
02809                    src[i].type() == src[0].type());
02810         totalCols += src[i].cols;
02811     }
02812     _dst.create( src[0].rows, totalCols, src[0].type());
02813     Mat dst = _dst.getMat();
02814     for( i = 0; i < nsrc; i++ )
02815     {
02816         Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows));
02817         src[i].copyTo(dpart);
02818         cols += src[i].cols;
02819     }
02820 }
02821 
02822 void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
02823 {
02824     Mat src[] = {src1.getMat(), src2.getMat()};
02825     hconcat(src, 2, dst);
02826 }
02827 
02828 void cv::hconcat(InputArray _src, OutputArray dst)
02829 {
02830     std::vector<Mat> src;
02831     _src.getMatVector(src);
02832     hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
02833 }
02834 
02835 void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
02836 {
02837     if( nsrc == 0 || !src )
02838     {
02839         _dst.release();
02840         return;
02841     }
02842 
02843     int totalRows = 0, rows = 0;
02844     size_t i;
02845     for( i = 0; i < nsrc; i++ )
02846     {
02847         CV_Assert(src[i].dims <= 2 &&
02848                   src[i].cols == src[0].cols &&
02849                   src[i].type() == src[0].type());
02850         totalRows += src[i].rows;
02851     }
02852     _dst.create( totalRows, src[0].cols, src[0].type());
02853     Mat dst = _dst.getMat();
02854     for( i = 0; i < nsrc; i++ )
02855     {
02856         Mat dpart(dst, Rect(0, rows, src[i].cols, src[i].rows));
02857         src[i].copyTo(dpart);
02858         rows += src[i].rows;
02859     }
02860 }
02861 
02862 void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
02863 {
02864     Mat src[] = {src1.getMat(), src2.getMat()};
02865     vconcat(src, 2, dst);
02866 }
02867 
02868 void cv::vconcat(InputArray _src, OutputArray dst)
02869 {
02870     std::vector<Mat> src;
02871     _src.getMatVector(src);
02872     vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
02873 }
02874 
02875 //////////////////////////////////////// set identity ////////////////////////////////////////////
02876 
02877 #ifdef HAVE_OPENCL
02878 
02879 namespace cv {
02880 
02881 static bool ocl_setIdentity( InputOutputArray _m, const Scalar& s )
02882 {
02883     int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), kercn = cn, rowsPerWI = 1;
02884     int sctype = CV_MAKE_TYPE(depth, cn == 3 ? 4 : cn);
02885     if (ocl::Device::getDefault().isIntel())
02886     {
02887         rowsPerWI = 4;
02888         if (cn == 1)
02889         {
02890             kercn = std::min(ocl::predictOptimalVectorWidth(_m), 4);
02891             if (kercn != 4)
02892                 kercn = 1;
02893         }
02894     }
02895 
02896     ocl::Kernel k("setIdentity", ocl::core::set_identity_oclsrc,
02897                   format("-D T=%s -D T1=%s -D cn=%d -D ST=%s -D kercn=%d -D rowsPerWI=%d",
02898                          ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
02899                          ocl::memopTypeToStr(depth), cn,
02900                          ocl::memopTypeToStr(sctype),
02901                          kercn, rowsPerWI));
02902     if (k.empty())
02903         return false;
02904 
02905     UMat m = _m.getUMat();
02906     k.args(ocl::KernelArg::WriteOnly(m, cn, kercn),
02907            ocl::KernelArg::Constant(Mat(1, 1, sctype, s)));
02908 
02909     size_t globalsize[2] = { (size_t)m.cols * cn / kercn, ((size_t)m.rows + rowsPerWI - 1) / rowsPerWI };
02910     return k.run(2, globalsize, NULL, false);
02911 }
02912 
02913 }
02914 
02915 #endif
02916 
02917 void cv::setIdentity( InputOutputArray _m, const Scalar & s )
02918 {
02919     CV_Assert( _m.dims() <= 2 );
02920 
02921 #ifdef HAVE_OPENCL
02922     CV_OCL_RUN(_m.isUMat(),
02923                ocl_setIdentity(_m, s))
02924 #endif
02925 
02926     Mat m = _m.getMat();
02927     int i, j, rows = m.rows, cols = m.cols, type = m.type();
02928 
02929     if( type == CV_32FC1 )
02930     {
02931         float* data = m.ptr<float>();
02932         float val = (float)s[0];
02933         size_t step = m.step/sizeof(data[0]);
02934 
02935         for( i = 0; i < rows; i++, data += step )
02936         {
02937             for( j = 0; j < cols; j++ )
02938                 data[j] = 0;
02939             if( i < cols )
02940                 data[i] = val;
02941         }
02942     }
02943     else if( type == CV_64FC1 )
02944     {
02945         double* data = m.ptr<double>();
02946         double val = s[0];
02947         size_t step = m.step/sizeof(data[0]);
02948 
02949         for( i = 0; i < rows; i++, data += step )
02950         {
02951             for( j = 0; j < cols; j++ )
02952                 data[j] = j == i ? val : 0;
02953         }
02954     }
02955     else
02956     {
02957         m = Scalar (0);
02958         m.diag() = s;
02959     }
02960 }
02961 
02962 //////////////////////////////////////////// trace ///////////////////////////////////////////
02963 
02964 cv::Scalar  cv::trace( InputArray _m )
02965 {
02966     Mat m = _m.getMat();
02967     CV_Assert( m.dims <= 2 );
02968     int i, type = m.type();
02969     int nm = std::min(m.rows, m.cols);
02970 
02971     if( type == CV_32FC1 )
02972     {
02973         const float* ptr = m.ptr<float>();
02974         size_t step = m.step/sizeof(ptr[0]) + 1;
02975         double _s = 0;
02976         for( i = 0; i < nm; i++ )
02977             _s += ptr[i*step];
02978         return _s;
02979     }
02980 
02981     if( type == CV_64FC1 )
02982     {
02983         const double* ptr = m.ptr<double>();
02984         size_t step = m.step/sizeof(ptr[0]) + 1;
02985         double _s = 0;
02986         for( i = 0; i < nm; i++ )
02987             _s += ptr[i*step];
02988         return _s;
02989     }
02990 
02991     return cv::sum(m.diag());
02992 }
02993 
02994 ////////////////////////////////////// transpose /////////////////////////////////////////
02995 
02996 namespace cv
02997 {
02998 
02999 template<typename T> static void
03000 transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
03001 {
03002     int i=0, j, m = sz.width, n = sz.height;
03003 
03004     #if CV_ENABLE_UNROLLED
03005     for(; i <= m - 4; i += 4 )
03006     {
03007         T* d0 = (T*)(dst + dstep*i);
03008         T* d1 = (T*)(dst + dstep*(i+1));
03009         T* d2 = (T*)(dst + dstep*(i+2));
03010         T* d3 = (T*)(dst + dstep*(i+3));
03011 
03012         for( j = 0; j <= n - 4; j += 4 )
03013         {
03014             const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
03015             const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
03016             const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
03017             const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
03018 
03019             d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
03020             d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
03021             d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
03022             d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
03023         }
03024 
03025         for( ; j < n; j++ )
03026         {
03027             const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
03028             d0[j] = s0[0]; d1[j] = s0[1]; d2[j] = s0[2]; d3[j] = s0[3];
03029         }
03030     }
03031     #endif
03032     for( ; i < m; i++ )
03033     {
03034         T* d0 = (T*)(dst + dstep*i);
03035         j = 0;
03036         #if CV_ENABLE_UNROLLED
03037         for(; j <= n - 4; j += 4 )
03038         {
03039             const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
03040             const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
03041             const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
03042             const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
03043 
03044             d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
03045         }
03046         #endif
03047         for( ; j < n; j++ )
03048         {
03049             const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
03050             d0[j] = s0[0];
03051         }
03052     }
03053 }
03054 
03055 template<typename T> static void
03056 transposeI_( uchar* data, size_t step, int n )
03057 {
03058     int i, j;
03059     for( i = 0; i < n; i++ )
03060     {
03061         T* row = (T*)(data + step*i);
03062         uchar* data1 = data + i*sizeof(T);
03063         for( j = i+1; j < n; j++ )
03064             std::swap( row[j], *(T*)(data1 + step*j) );
03065     }
03066 }
03067 
03068 typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
03069 typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
03070 
03071 #define DEF_TRANSPOSE_FUNC(suffix, type) \
03072 static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
03073 { transpose_<type>(src, sstep, dst, dstep, sz); } \
03074 \
03075 static void transposeI_##suffix( uchar* data, size_t step, int n ) \
03076 { transposeI_<type>(data, step, n); }
03077 
03078 DEF_TRANSPOSE_FUNC(8u, uchar)
03079 DEF_TRANSPOSE_FUNC(16u, ushort)
03080 DEF_TRANSPOSE_FUNC(8uC3, Vec3b)
03081 DEF_TRANSPOSE_FUNC(32s, int)
03082 DEF_TRANSPOSE_FUNC(16uC3, Vec3s)
03083 DEF_TRANSPOSE_FUNC(32sC2, Vec2i)
03084 DEF_TRANSPOSE_FUNC(32sC3, Vec3i)
03085 DEF_TRANSPOSE_FUNC(32sC4, Vec4i)
03086 DEF_TRANSPOSE_FUNC(32sC6, Vec6i)
03087 DEF_TRANSPOSE_FUNC(32sC8, Vec8i)
03088 
03089 static TransposeFunc transposeTab[] =
03090 {
03091     0, transpose_8u, transpose_16u, transpose_8uC3, transpose_32s, 0, transpose_16uC3, 0,
03092     transpose_32sC2, 0, 0, 0, transpose_32sC3, 0, 0, 0, transpose_32sC4,
03093     0, 0, 0, 0, 0, 0, 0, transpose_32sC6, 0, 0, 0, 0, 0, 0, 0, transpose_32sC8
03094 };
03095 
03096 static TransposeInplaceFunc transposeInplaceTab[] =
03097 {
03098     0, transposeI_8u, transposeI_16u, transposeI_8uC3, transposeI_32s, 0, transposeI_16uC3, 0,
03099     transposeI_32sC2, 0, 0, 0, transposeI_32sC3, 0, 0, 0, transposeI_32sC4,
03100     0, 0, 0, 0, 0, 0, 0, transposeI_32sC6, 0, 0, 0, 0, 0, 0, 0, transposeI_32sC8
03101 };
03102 
03103 #ifdef HAVE_OPENCL
03104 
03105 static inline int divUp(int a, int b)
03106 {
03107     return (a + b - 1) / b;
03108 }
03109 
03110 static bool ocl_transpose( InputArray _src, OutputArray _dst )
03111 {
03112     const ocl::Device & dev = ocl::Device::getDefault();
03113     const int TILE_DIM = 32, BLOCK_ROWS = 8;
03114     int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
03115         rowsPerWI = dev.isIntel() ? 4 : 1;
03116 
03117     UMat src = _src.getUMat();
03118     _dst.create(src.cols, src.rows, type);
03119     UMat dst = _dst.getUMat();
03120 
03121     String kernelName("transpose");
03122     bool inplace = dst.u == src.u;
03123 
03124     if (inplace)
03125     {
03126         CV_Assert(dst.cols == dst.rows);
03127         kernelName += "_inplace";
03128     }
03129     else
03130     {
03131         // check required local memory size
03132         size_t required_local_memory = (size_t) TILE_DIM*(TILE_DIM+1)*CV_ELEM_SIZE(type);
03133         if (required_local_memory > ocl::Device::getDefault().localMemSize())
03134             return false;
03135     }
03136 
03137     ocl::Kernel k(kernelName.c_str(), ocl::core::transpose_oclsrc,
03138                   format("-D T=%s -D T1=%s -D cn=%d -D TILE_DIM=%d -D BLOCK_ROWS=%d -D rowsPerWI=%d%s",
03139                          ocl::memopTypeToStr(type), ocl::memopTypeToStr(depth),
03140                          cn, TILE_DIM, BLOCK_ROWS, rowsPerWI, inplace ? " -D INPLACE" : ""));
03141     if (k.empty())
03142         return false;
03143 
03144     if (inplace)
03145         k.args(ocl::KernelArg::ReadWriteNoSize(dst), dst.rows);
03146     else
03147         k.args(ocl::KernelArg::ReadOnly(src),
03148                ocl::KernelArg::WriteOnlyNoSize(dst));
03149 
03150     size_t localsize[2]  = { TILE_DIM, BLOCK_ROWS };
03151     size_t globalsize[2] = { (size_t)src.cols, inplace ? ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI : (divUp((size_t)src.rows, TILE_DIM) * BLOCK_ROWS) };
03152 
03153     if (inplace && dev.isIntel())
03154     {
03155         localsize[0] = 16;
03156         localsize[1] = dev.maxWorkGroupSize() / localsize[0];
03157     }
03158 
03159     return k.run(2, globalsize, localsize, false);
03160 }
03161 
03162 #endif
03163 
03164 #ifdef HAVE_IPP
03165 static bool ipp_transpose( Mat &src, Mat &dst )
03166 {
03167     int type = src.type();
03168     typedef IppStatus (CV_STDCALL * ippiTranspose)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize);
03169     typedef IppStatus (CV_STDCALL * ippiTransposeI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize);
03170     ippiTranspose ippFunc = 0;
03171     ippiTransposeI ippFuncI = 0;
03172 
03173     if (dst.data == src.data && dst.cols == dst.rows)
03174     {
03175         CV_SUPPRESS_DEPRECATED_START
03176         ippFuncI =
03177             type == CV_8UC1 ? (ippiTransposeI)ippiTranspose_8u_C1IR :
03178             type == CV_8UC3 ? (ippiTransposeI)ippiTranspose_8u_C3IR :
03179             type == CV_8UC4 ? (ippiTransposeI)ippiTranspose_8u_C4IR :
03180             type == CV_16UC1 ? (ippiTransposeI)ippiTranspose_16u_C1IR :
03181             type == CV_16UC3 ? (ippiTransposeI)ippiTranspose_16u_C3IR :
03182             type == CV_16UC4 ? (ippiTransposeI)ippiTranspose_16u_C4IR :
03183             type == CV_16SC1 ? (ippiTransposeI)ippiTranspose_16s_C1IR :
03184             type == CV_16SC3 ? (ippiTransposeI)ippiTranspose_16s_C3IR :
03185             type == CV_16SC4 ? (ippiTransposeI)ippiTranspose_16s_C4IR :
03186             type == CV_32SC1 ? (ippiTransposeI)ippiTranspose_32s_C1IR :
03187             type == CV_32SC3 ? (ippiTransposeI)ippiTranspose_32s_C3IR :
03188             type == CV_32SC4 ? (ippiTransposeI)ippiTranspose_32s_C4IR :
03189             type == CV_32FC1 ? (ippiTransposeI)ippiTranspose_32f_C1IR :
03190             type == CV_32FC3 ? (ippiTransposeI)ippiTranspose_32f_C3IR :
03191             type == CV_32FC4 ? (ippiTransposeI)ippiTranspose_32f_C4IR : 0;
03192         CV_SUPPRESS_DEPRECATED_END
03193     }
03194     else
03195     {
03196         ippFunc =
03197             type == CV_8UC1 ? (ippiTranspose)ippiTranspose_8u_C1R :
03198             type == CV_8UC3 ? (ippiTranspose)ippiTranspose_8u_C3R :
03199             type == CV_8UC4 ? (ippiTranspose)ippiTranspose_8u_C4R :
03200             type == CV_16UC1 ? (ippiTranspose)ippiTranspose_16u_C1R :
03201             type == CV_16UC3 ? (ippiTranspose)ippiTranspose_16u_C3R :
03202             type == CV_16UC4 ? (ippiTranspose)ippiTranspose_16u_C4R :
03203             type == CV_16SC1 ? (ippiTranspose)ippiTranspose_16s_C1R :
03204             type == CV_16SC3 ? (ippiTranspose)ippiTranspose_16s_C3R :
03205             type == CV_16SC4 ? (ippiTranspose)ippiTranspose_16s_C4R :
03206             type == CV_32SC1 ? (ippiTranspose)ippiTranspose_32s_C1R :
03207             type == CV_32SC3 ? (ippiTranspose)ippiTranspose_32s_C3R :
03208             type == CV_32SC4 ? (ippiTranspose)ippiTranspose_32s_C4R :
03209             type == CV_32FC1 ? (ippiTranspose)ippiTranspose_32f_C1R :
03210             type == CV_32FC3 ? (ippiTranspose)ippiTranspose_32f_C3R :
03211             type == CV_32FC4 ? (ippiTranspose)ippiTranspose_32f_C4R : 0;
03212     }
03213 
03214     IppiSize roiSize = { src.cols, src.rows };
03215     if (ippFunc != 0)
03216     {
03217         if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0)
03218             return true;
03219     }
03220     else if (ippFuncI != 0)
03221     {
03222         if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0)
03223             return true;
03224     }
03225     return false;
03226 }
03227 #endif
03228 
03229 }
03230 
03231 
03232 void cv::transpose( InputArray _src, OutputArray _dst )
03233 {
03234     int type = _src.type(), esz = CV_ELEM_SIZE(type);
03235     CV_Assert( _src.dims() <= 2 && esz <= 32 );
03236 
03237 #ifdef HAVE_OPENCL
03238     CV_OCL_RUN(_dst.isUMat(),
03239                ocl_transpose(_src, _dst))
03240 #endif
03241 
03242     Mat src = _src.getMat();
03243     if( src.empty() )
03244     {
03245         _dst.release();
03246         return;
03247     }
03248 
03249     _dst.create(src.cols, src.rows, src.type());
03250     Mat dst = _dst.getMat();
03251 
03252     // handle the case of single-column/single-row matrices, stored in STL vectors.
03253     if( src.rows != dst.cols || src.cols != dst.rows )
03254     {
03255         CV_Assert( src.size() == dst.size() && (src.cols == 1 || src.rows == 1) );
03256         src.copyTo(dst);
03257         return;
03258     }
03259 
03260     CV_IPP_RUN(true, ipp_transpose(src, dst))
03261 
03262     if( dst.data == src.data )
03263     {
03264         TransposeInplaceFunc func = transposeInplaceTab[esz];
03265         CV_Assert( func != 0 );
03266         CV_Assert( dst.cols == dst.rows );
03267         func( dst.ptr(), dst.step, dst.rows );
03268     }
03269     else
03270     {
03271         TransposeFunc func = transposeTab[esz];
03272         CV_Assert( func != 0 );
03273         func( src.ptr(), src.step, dst.ptr(), dst.step, src.size() );
03274     }
03275 }
03276 
03277 
03278 ////////////////////////////////////// completeSymm /////////////////////////////////////////
03279 
03280 void cv::completeSymm( InputOutputArray _m, bool LtoR )
03281 {
03282     Mat m = _m.getMat();
03283     size_t step = m.step, esz = m.elemSize();
03284     CV_Assert( m.dims <= 2 && m.rows == m.cols );
03285 
03286     int rows = m.rows;
03287     int j0 = 0, j1 = rows;
03288 
03289     uchar* data = m.ptr();
03290     for( int i = 0; i < rows; i++ )
03291     {
03292         if( !LtoR ) j1 = i; else j0 = i+1;
03293         for( int j = j0; j < j1; j++ )
03294             memcpy(data + (i*step + j*esz), data + (j*step + i*esz), esz);
03295     }
03296 }
03297 
03298 
03299 cv::Mat cv::Mat::cross(InputArray _m) const
03300 {
03301     Mat m = _m.getMat();
03302     int tp = type(), d = CV_MAT_DEPTH(tp);
03303     CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && tp == m.type() &&
03304         ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
03305     Mat result(rows, cols, tp);
03306 
03307     if( d == CV_32F )
03308     {
03309         const float *a = (const float*)data, *b = (const float*)m.data;
03310         float* c = (float*)result.data;
03311         size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
03312         size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
03313 
03314         c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
03315         c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
03316         c[2] = a[0] * b[ldb] - a[lda] * b[0];
03317     }
03318     else if( d == CV_64F )
03319     {
03320         const double *a = (const double*)data, *b = (const double*)m.data;
03321         double* c = (double*)result.data;
03322         size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
03323         size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
03324 
03325         c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
03326         c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
03327         c[2] = a[0] * b[ldb] - a[lda] * b[0];
03328     }
03329 
03330     return result;
03331 }
03332 
03333 
03334 ////////////////////////////////////////// reduce ////////////////////////////////////////////
03335 
03336 namespace cv
03337 {
03338 
03339 template<typename T, typename ST, class Op> static void
03340 reduceR_( const Mat& srcmat, Mat& dstmat )
03341 {
03342     typedef typename Op::rtype WT;
03343     Size size = srcmat.size();
03344     size.width *= srcmat.channels();
03345     AutoBuffer<WT> buffer(size.width);
03346     WT* buf = buffer;
03347     ST* dst = dstmat.ptr<ST>();
03348     const T* src = srcmat.ptr<T>();
03349     size_t srcstep = srcmat.step/sizeof(src[0]);
03350     int i;
03351     Op op;
03352 
03353     for( i = 0; i < size.width; i++ )
03354         buf[i] = src[i];
03355 
03356     for( ; --size.height; )
03357     {
03358         src += srcstep;
03359         i = 0;
03360         #if CV_ENABLE_UNROLLED
03361         for(; i <= size.width - 4; i += 4 )
03362         {
03363             WT s0, s1;
03364             s0 = op(buf[i], (WT)src[i]);
03365             s1 = op(buf[i+1], (WT)src[i+1]);
03366             buf[i] = s0; buf[i+1] = s1;
03367 
03368             s0 = op(buf[i+2], (WT)src[i+2]);
03369             s1 = op(buf[i+3], (WT)src[i+3]);
03370             buf[i+2] = s0; buf[i+3] = s1;
03371         }
03372         #endif
03373         for( ; i < size.width; i++ )
03374             buf[i] = op(buf[i], (WT)src[i]);
03375     }
03376 
03377     for( i = 0; i < size.width; i++ )
03378         dst[i] = (ST)buf[i];
03379 }
03380 
03381 
03382 template<typename T, typename ST, class Op> static void
03383 reduceC_( const Mat& srcmat, Mat& dstmat )
03384 {
03385     typedef typename Op::rtype WT;
03386     Size size = srcmat.size();
03387     int i, k, cn = srcmat.channels();
03388     size.width *= cn;
03389     Op op;
03390 
03391     for( int y = 0; y < size.height; y++ )
03392     {
03393         const T* src = srcmat.ptr<T>(y);
03394         ST* dst = dstmat.ptr<ST>(y);
03395         if( size.width == cn )
03396             for( k = 0; k < cn; k++ )
03397                 dst[k] = src[k];
03398         else
03399         {
03400             for( k = 0; k < cn; k++ )
03401             {
03402                 WT a0 = src[k], a1 = src[k+cn];
03403                 for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn )
03404                 {
03405                     a0 = op(a0, (WT)src[i+k]);
03406                     a1 = op(a1, (WT)src[i+k+cn]);
03407                     a0 = op(a0, (WT)src[i+k+cn*2]);
03408                     a1 = op(a1, (WT)src[i+k+cn*3]);
03409                 }
03410 
03411                 for( ; i < size.width; i += cn )
03412                 {
03413                     a0 = op(a0, (WT)src[i+k]);
03414                 }
03415                 a0 = op(a0, a1);
03416               dst[k] = (ST)a0;
03417             }
03418         }
03419     }
03420 }
03421 
03422 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
03423 
03424 }
03425 
03426 #define reduceSumR8u32s  reduceR_<uchar, int,   OpAdd<int> >
03427 #define reduceSumR8u32f  reduceR_<uchar, float, OpAdd<int> >
03428 #define reduceSumR8u64f  reduceR_<uchar, double,OpAdd<int> >
03429 #define reduceSumR16u32f reduceR_<ushort,float, OpAdd<float> >
03430 #define reduceSumR16u64f reduceR_<ushort,double,OpAdd<double> >
03431 #define reduceSumR16s32f reduceR_<short, float, OpAdd<float> >
03432 #define reduceSumR16s64f reduceR_<short, double,OpAdd<double> >
03433 #define reduceSumR32f32f reduceR_<float, float, OpAdd<float> >
03434 #define reduceSumR32f64f reduceR_<float, double,OpAdd<double> >
03435 #define reduceSumR64f64f reduceR_<double,double,OpAdd<double> >
03436 
03437 #define reduceMaxR8u  reduceR_<uchar, uchar, OpMax<uchar> >
03438 #define reduceMaxR16u reduceR_<ushort,ushort,OpMax<ushort> >
03439 #define reduceMaxR16s reduceR_<short, short, OpMax<short> >
03440 #define reduceMaxR32f reduceR_<float, float, OpMax<float> >
03441 #define reduceMaxR64f reduceR_<double,double,OpMax<double> >
03442 
03443 #define reduceMinR8u  reduceR_<uchar, uchar, OpMin<uchar> >
03444 #define reduceMinR16u reduceR_<ushort,ushort,OpMin<ushort> >
03445 #define reduceMinR16s reduceR_<short, short, OpMin<short> >
03446 #define reduceMinR32f reduceR_<float, float, OpMin<float> >
03447 #define reduceMinR64f reduceR_<double,double,OpMin<double> >
03448 
03449 #ifdef HAVE_IPP
03450 static inline bool ipp_reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& dstmat)
03451 {
03452     int sstep = (int)srcmat.step, stype = srcmat.type(),
03453             ddepth = dstmat.depth();
03454 
03455     IppiSize roisize = { srcmat.size().width, 1 };
03456 
03457     typedef IppStatus (CV_STDCALL * ippiSum)(const void * pSrc, int srcStep, IppiSize roiSize, Ipp64f* pSum);
03458     typedef IppStatus (CV_STDCALL * ippiSumHint)(const void * pSrc, int srcStep, IppiSize roiSize, Ipp64f* pSum, IppHintAlgorithm hint);
03459     ippiSum ippFunc = 0;
03460     ippiSumHint ippFuncHint = 0;
03461 
03462     if(ddepth == CV_64F)
03463     {
03464         ippFunc =
03465             stype == CV_8UC1 ? (ippiSum)ippiSum_8u_C1R :
03466             stype == CV_8UC3 ? (ippiSum)ippiSum_8u_C3R :
03467             stype == CV_8UC4 ? (ippiSum)ippiSum_8u_C4R :
03468             stype == CV_16UC1 ? (ippiSum)ippiSum_16u_C1R :
03469             stype == CV_16UC3 ? (ippiSum)ippiSum_16u_C3R :
03470             stype == CV_16UC4 ? (ippiSum)ippiSum_16u_C4R :
03471             stype == CV_16SC1 ? (ippiSum)ippiSum_16s_C1R :
03472             stype == CV_16SC3 ? (ippiSum)ippiSum_16s_C3R :
03473             stype == CV_16SC4 ? (ippiSum)ippiSum_16s_C4R : 0;
03474         ippFuncHint =
03475             stype == CV_32FC1 ? (ippiSumHint)ippiSum_32f_C1R :
03476             stype == CV_32FC3 ? (ippiSumHint)ippiSum_32f_C3R :
03477             stype == CV_32FC4 ? (ippiSumHint)ippiSum_32f_C4R : 0;
03478     }
03479 
03480     if(ippFunc)
03481     {
03482         for(int y = 0; y < srcmat.size().height; y++)
03483         {
03484             if(ippFunc(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
03485                 return false;
03486         }
03487         return true;
03488     }
03489     else if(ippFuncHint)
03490     {
03491         for(int y = 0; y < srcmat.size().height; y++)
03492         {
03493             if(ippFuncHint(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
03494                 return false;
03495         }
03496         return true;
03497     }
03498 
03499     return false;
03500 }
03501 
03502 static inline void reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& dstmat)
03503 {
03504     CV_IPP_RUN(true, ipp_reduceSumC_8u16u16s32f_64f(srcmat, dstmat));
03505 
03506     cv::ReduceFunc func = 0;
03507 
03508     if(dstmat.depth() == CV_64F)
03509     {
03510         int sdepth = CV_MAT_DEPTH(srcmat.type());
03511         func =
03512             sdepth == CV_8U ? (cv::ReduceFunc)cv::reduceC_<uchar, double,   cv::OpAdd<double> > :
03513             sdepth == CV_16U ? (cv::ReduceFunc)cv::reduceC_<ushort, double,   cv::OpAdd<double> > :
03514             sdepth == CV_16S ? (cv::ReduceFunc)cv::reduceC_<short, double,   cv::OpAdd<double> > :
03515             sdepth == CV_32F ? (cv::ReduceFunc)cv::reduceC_<float, double,   cv::OpAdd<double> > : 0;
03516     }
03517     CV_Assert(func);
03518 
03519     func(srcmat, dstmat);
03520 }
03521 
03522 #endif
03523 
03524 #define reduceSumC8u32s  reduceC_<uchar, int,   OpAdd<int> >
03525 #define reduceSumC8u32f  reduceC_<uchar, float, OpAdd<int> >
03526 #define reduceSumC16u32f reduceC_<ushort,float, OpAdd<float> >
03527 #define reduceSumC16s32f reduceC_<short, float, OpAdd<float> >
03528 #define reduceSumC32f32f reduceC_<float, float, OpAdd<float> >
03529 #define reduceSumC64f64f reduceC_<double,double,OpAdd<double> >
03530 
03531 #ifdef HAVE_IPP
03532 #define reduceSumC8u64f  reduceSumC_8u16u16s32f_64f
03533 #define reduceSumC16u64f reduceSumC_8u16u16s32f_64f
03534 #define reduceSumC16s64f reduceSumC_8u16u16s32f_64f
03535 #define reduceSumC32f64f reduceSumC_8u16u16s32f_64f
03536 #else
03537 #define reduceSumC8u64f  reduceC_<uchar, double,OpAdd<int> >
03538 #define reduceSumC16u64f reduceC_<ushort,double,OpAdd<double> >
03539 #define reduceSumC16s64f reduceC_<short, double,OpAdd<double> >
03540 #define reduceSumC32f64f reduceC_<float, double,OpAdd<double> >
03541 #endif
03542 
03543 #ifdef HAVE_IPP
03544 #define REDUCE_OP(favor, optype, type1, type2) \
03545 static inline bool ipp_reduce##optype##C##favor(const cv::Mat& srcmat, cv::Mat& dstmat) \
03546 { \
03547     if((srcmat.channels() == 1)) \
03548     { \
03549         int sstep = (int)srcmat.step; \
03550         typedef Ipp##favor IppType; \
03551         IppiSize roisize = ippiSize(srcmat.size().width, 1);\
03552         for(int y = 0; y < srcmat.size().height; y++)\
03553         {\
03554             if(ippi##optype##_##favor##_C1R(srcmat.ptr<IppType>(y), sstep, roisize, dstmat.ptr<IppType>(y)) < 0)\
03555                 return false;\
03556         }\
03557         return true;\
03558     }\
03559     return false; \
03560 } \
03561 static inline void reduce##optype##C##favor(const cv::Mat& srcmat, cv::Mat& dstmat) \
03562 { \
03563     CV_IPP_RUN(true, ipp_reduce##optype##C##favor(srcmat, dstmat)); \
03564     cv::reduceC_ < type1, type2, cv::Op##optype < type2 > >(srcmat, dstmat); \
03565 }
03566 #endif
03567 
03568 #ifdef HAVE_IPP
03569 REDUCE_OP(8u, Max, uchar, uchar)
03570 REDUCE_OP(16u, Max, ushort, ushort)
03571 REDUCE_OP(16s, Max, short, short)
03572 REDUCE_OP(32f, Max, float, float)
03573 #else
03574 #define reduceMaxC8u  reduceC_<uchar, uchar, OpMax<uchar> >
03575 #define reduceMaxC16u reduceC_<ushort,ushort,OpMax<ushort> >
03576 #define reduceMaxC16s reduceC_<short, short, OpMax<short> >
03577 #define reduceMaxC32f reduceC_<float, float, OpMax<float> >
03578 #endif
03579 #define reduceMaxC64f reduceC_<double,double,OpMax<double> >
03580 
03581 #ifdef HAVE_IPP
03582 REDUCE_OP(8u, Min, uchar, uchar)
03583 REDUCE_OP(16u, Min, ushort, ushort)
03584 REDUCE_OP(16s, Min, short, short)
03585 REDUCE_OP(32f, Min, float, float)
03586 #else
03587 #define reduceMinC8u  reduceC_<uchar, uchar, OpMin<uchar> >
03588 #define reduceMinC16u reduceC_<ushort,ushort,OpMin<ushort> >
03589 #define reduceMinC16s reduceC_<short, short, OpMin<short> >
03590 #define reduceMinC32f reduceC_<float, float, OpMin<float> >
03591 #endif
03592 #define reduceMinC64f reduceC_<double,double,OpMin<double> >
03593 
03594 #ifdef HAVE_OPENCL
03595 
03596 namespace cv {
03597 
03598 static bool ocl_reduce(InputArray _src, OutputArray _dst,
03599                        int dim, int op, int op0, int stype, int dtype)
03600 {
03601     const int min_opt_cols = 128, buf_cols = 32;
03602     int sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
03603             ddepth = CV_MAT_DEPTH(dtype), ddepth0 = ddepth;
03604     const ocl::Device &defDev = ocl::Device::getDefault();
03605     bool doubleSupport = defDev.doubleFPConfig() > 0;
03606 
03607     size_t wgs = defDev.maxWorkGroupSize();
03608     bool useOptimized = 1 == dim && _src.cols() > min_opt_cols && (wgs >= buf_cols);
03609 
03610     if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
03611         return false;
03612 
03613     if (op == CV_REDUCE_AVG)
03614     {
03615         if (sdepth < CV_32S && ddepth < CV_32S)
03616             ddepth = CV_32S;
03617     }
03618 
03619     const char * const ops[4] = { "OCL_CV_REDUCE_SUM", "OCL_CV_REDUCE_AVG",
03620                                   "OCL_CV_REDUCE_MAX", "OCL_CV_REDUCE_MIN" };
03621     int wdepth = std::max(ddepth, CV_32F);
03622     if (useOptimized)
03623     {
03624         size_t tileHeight = (size_t)(wgs / buf_cols);
03625         if (defDev.isIntel())
03626         {
03627             static const size_t maxItemInGroupCount = 16;
03628             tileHeight = min(tileHeight, defDev.localMemSize() / buf_cols / CV_ELEM_SIZE(CV_MAKETYPE(wdepth, cn)) / maxItemInGroupCount);
03629         }
03630         char cvt[3][40];
03631         cv::String build_opt = format("-D OP_REDUCE_PRE -D BUF_COLS=%d -D TILE_HEIGHT=%d -D %s -D dim=1"
03632                                             " -D cn=%d -D ddepth=%d"
03633                                             " -D srcT=%s -D bufT=%s -D dstT=%s"
03634                                             " -D convertToWT=%s -D convertToBufT=%s -D convertToDT=%s%s",
03635                                             buf_cols, tileHeight, ops[op], cn, ddepth,
03636                                             ocl::typeToStr(sdepth),
03637                                             ocl::typeToStr(ddepth),
03638                                             ocl::typeToStr(ddepth0),
03639                                             ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]),
03640                                             ocl::convertTypeStr(sdepth, ddepth, 1, cvt[1]),
03641                                             ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[2]),
03642                                             doubleSupport ? " -D DOUBLE_SUPPORT" : "");
03643         ocl::Kernel k("reduce_horz_opt", ocl::core::reduce2_oclsrc, build_opt);
03644         if (k.empty())
03645             return false;
03646         UMat src = _src.getUMat();
03647         Size dsize(1, src.rows);
03648         _dst.create(dsize, dtype);
03649         UMat dst = _dst.getUMat();
03650 
03651         if (op0 == CV_REDUCE_AVG)
03652             k.args(ocl::KernelArg::ReadOnly(src),
03653                       ocl::KernelArg::WriteOnlyNoSize(dst), 1.0f / src.cols);
03654         else
03655             k.args(ocl::KernelArg::ReadOnly(src),
03656                       ocl::KernelArg::WriteOnlyNoSize(dst));
03657 
03658         size_t localSize[2] = { (size_t)buf_cols, (size_t)tileHeight};
03659         size_t globalSize[2] = { (size_t)buf_cols, (size_t)src.rows };
03660         return k.run(2, globalSize, localSize, false);
03661     }
03662     else
03663     {
03664         char cvt[2][40];
03665         cv::String build_opt = format("-D %s -D dim=%d -D cn=%d -D ddepth=%d"
03666                                       " -D srcT=%s -D dstT=%s -D dstT0=%s -D convertToWT=%s"
03667                                       " -D convertToDT=%s -D convertToDT0=%s%s",
03668                                       ops[op], dim, cn, ddepth, ocl::typeToStr(useOptimized ? ddepth : sdepth),
03669                                       ocl::typeToStr(ddepth), ocl::typeToStr(ddepth0),
03670                                       ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]),
03671                                       ocl::convertTypeStr(sdepth, ddepth, 1, cvt[0]),
03672                                       ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[1]),
03673                                       doubleSupport ? " -D DOUBLE_SUPPORT" : "");
03674 
03675         ocl::Kernel k("reduce", ocl::core::reduce2_oclsrc, build_opt);
03676         if (k.empty())
03677             return false;
03678 
03679         UMat src = _src.getUMat();
03680         Size dsize(dim == 0 ? src.cols : 1, dim == 0 ? 1 : src.rows);
03681         _dst.create(dsize, dtype);
03682         UMat dst = _dst.getUMat();
03683 
03684         ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src),
03685                 temparg = ocl::KernelArg::WriteOnlyNoSize(dst);
03686 
03687         if (op0 == CV_REDUCE_AVG)
03688             k.args(srcarg, temparg, 1.0f / (dim == 0 ? src.rows : src.cols));
03689         else
03690             k.args(srcarg, temparg);
03691 
03692         size_t globalsize = std::max(dsize.width, dsize.height);
03693         return k.run(1, &globalsize, NULL, false);
03694     }
03695 }
03696 
03697 }
03698 
03699 #endif
03700 
03701 void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
03702 {
03703     CV_Assert( _src.dims() <= 2 );
03704     int op0 = op;
03705     int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
03706     if( dtype < 0 )
03707         dtype = _dst.fixedType() ? _dst.type() : stype;
03708     dtype = CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn);
03709     int ddepth = CV_MAT_DEPTH(dtype);
03710 
03711     CV_Assert( cn == CV_MAT_CN(dtype) );
03712     CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
03713                op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
03714 
03715 #ifdef HAVE_OPENCL
03716     CV_OCL_RUN(_dst.isUMat(),
03717                ocl_reduce(_src, _dst, dim, op, op0, stype, dtype))
03718 #endif
03719 
03720     Mat src = _src.getMat();
03721     _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1, dtype);
03722     Mat dst = _dst.getMat(), temp = dst;
03723 
03724     if( op == CV_REDUCE_AVG )
03725     {
03726         op = CV_REDUCE_SUM;
03727         if( sdepth < CV_32S && ddepth < CV_32S )
03728         {
03729             temp.create(dst.rows, dst.cols, CV_32SC(cn));
03730             ddepth = CV_32S;
03731         }
03732     }
03733 
03734     ReduceFunc func = 0;
03735     if( dim == 0 )
03736     {
03737         if( op == CV_REDUCE_SUM )
03738         {
03739             if(sdepth == CV_8U && ddepth == CV_32S)
03740                 func = GET_OPTIMIZED(reduceSumR8u32s);
03741             else if(sdepth == CV_8U && ddepth == CV_32F)
03742                 func = reduceSumR8u32f;
03743             else if(sdepth == CV_8U && ddepth == CV_64F)
03744                 func = reduceSumR8u64f;
03745             else if(sdepth == CV_16U && ddepth == CV_32F)
03746                 func = reduceSumR16u32f;
03747             else if(sdepth == CV_16U && ddepth == CV_64F)
03748                 func = reduceSumR16u64f;
03749             else if(sdepth == CV_16S && ddepth == CV_32F)
03750                 func = reduceSumR16s32f;
03751             else if(sdepth == CV_16S && ddepth == CV_64F)
03752                 func = reduceSumR16s64f;
03753             else if(sdepth == CV_32F && ddepth == CV_32F)
03754                 func = GET_OPTIMIZED(reduceSumR32f32f);
03755             else if(sdepth == CV_32F && ddepth == CV_64F)
03756                 func = reduceSumR32f64f;
03757             else if(sdepth == CV_64F && ddepth == CV_64F)
03758                 func = reduceSumR64f64f;
03759         }
03760         else if(op == CV_REDUCE_MAX)
03761         {
03762             if(sdepth == CV_8U && ddepth == CV_8U)
03763                 func = GET_OPTIMIZED(reduceMaxR8u);
03764             else if(sdepth == CV_16U && ddepth == CV_16U)
03765                 func = reduceMaxR16u;
03766             else if(sdepth == CV_16S && ddepth == CV_16S)
03767                 func = reduceMaxR16s;
03768             else if(sdepth == CV_32F && ddepth == CV_32F)
03769                 func = GET_OPTIMIZED(reduceMaxR32f);
03770             else if(sdepth == CV_64F && ddepth == CV_64F)
03771                 func = reduceMaxR64f;
03772         }
03773         else if(op == CV_REDUCE_MIN)
03774         {
03775             if(sdepth == CV_8U && ddepth == CV_8U)
03776                 func = GET_OPTIMIZED(reduceMinR8u);
03777             else if(sdepth == CV_16U && ddepth == CV_16U)
03778                 func = reduceMinR16u;
03779             else if(sdepth == CV_16S && ddepth == CV_16S)
03780                 func = reduceMinR16s;
03781             else if(sdepth == CV_32F && ddepth == CV_32F)
03782                 func = GET_OPTIMIZED(reduceMinR32f);
03783             else if(sdepth == CV_64F && ddepth == CV_64F)
03784                 func = reduceMinR64f;
03785         }
03786     }
03787     else
03788     {
03789         if(op == CV_REDUCE_SUM)
03790         {
03791             if(sdepth == CV_8U && ddepth == CV_32S)
03792                 func = GET_OPTIMIZED(reduceSumC8u32s);
03793             else if(sdepth == CV_8U && ddepth == CV_32F)
03794                 func = reduceSumC8u32f;
03795             else if(sdepth == CV_8U && ddepth == CV_64F)
03796                 func = reduceSumC8u64f;
03797             else if(sdepth == CV_16U && ddepth == CV_32F)
03798                 func = reduceSumC16u32f;
03799             else if(sdepth == CV_16U && ddepth == CV_64F)
03800                 func = reduceSumC16u64f;
03801             else if(sdepth == CV_16S && ddepth == CV_32F)
03802                 func = reduceSumC16s32f;
03803             else if(sdepth == CV_16S && ddepth == CV_64F)
03804                 func = reduceSumC16s64f;
03805             else if(sdepth == CV_32F && ddepth == CV_32F)
03806                 func = GET_OPTIMIZED(reduceSumC32f32f);
03807             else if(sdepth == CV_32F && ddepth == CV_64F)
03808                 func = reduceSumC32f64f;
03809             else if(sdepth == CV_64F && ddepth == CV_64F)
03810                 func = reduceSumC64f64f;
03811         }
03812         else if(op == CV_REDUCE_MAX)
03813         {
03814             if(sdepth == CV_8U && ddepth == CV_8U)
03815                 func = GET_OPTIMIZED(reduceMaxC8u);
03816             else if(sdepth == CV_16U && ddepth == CV_16U)
03817                 func = reduceMaxC16u;
03818             else if(sdepth == CV_16S && ddepth == CV_16S)
03819                 func = reduceMaxC16s;
03820             else if(sdepth == CV_32F && ddepth == CV_32F)
03821                 func = GET_OPTIMIZED(reduceMaxC32f);
03822             else if(sdepth == CV_64F && ddepth == CV_64F)
03823                 func = reduceMaxC64f;
03824         }
03825         else if(op == CV_REDUCE_MIN)
03826         {
03827             if(sdepth == CV_8U && ddepth == CV_8U)
03828                 func = GET_OPTIMIZED(reduceMinC8u);
03829             else if(sdepth == CV_16U && ddepth == CV_16U)
03830                 func = reduceMinC16u;
03831             else if(sdepth == CV_16S && ddepth == CV_16S)
03832                 func = reduceMinC16s;
03833             else if(sdepth == CV_32F && ddepth == CV_32F)
03834                 func = GET_OPTIMIZED(reduceMinC32f);
03835             else if(sdepth == CV_64F && ddepth == CV_64F)
03836                 func = reduceMinC64f;
03837         }
03838     }
03839 
03840     if( !func )
03841         CV_Error( CV_StsUnsupportedFormat,
03842                   "Unsupported combination of input and output array formats" );
03843 
03844     func( src, temp );
03845 
03846     if( op0 == CV_REDUCE_AVG )
03847         temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
03848 }
03849 
03850 
03851 //////////////////////////////////////// sort ///////////////////////////////////////////
03852 
03853 namespace cv
03854 {
03855 
03856 #ifdef HAVE_IPP
03857 #define USE_IPP_SORT
03858 
03859 typedef IppStatus (CV_STDCALL * IppSortFunc)(void *, int);
03860 typedef IppSortFunc IppFlipFunc;
03861 
03862 static IppSortFunc getSortFunc(int depth, bool sortDescending)
03863 {
03864     if (!sortDescending)
03865         return depth == CV_8U ? (IppSortFunc)ippsSortAscend_8u_I :
03866 #if IPP_DISABLE_BLOCK
03867             depth == CV_16U ? (IppSortFunc)ippsSortAscend_16u_I :
03868             depth == CV_16S ? (IppSortFunc)ippsSortAscend_16s_I :
03869             depth == CV_32S ? (IppSortFunc)ippsSortAscend_32s_I :
03870             depth == CV_32F ? (IppSortFunc)ippsSortAscend_32f_I :
03871             depth == CV_64F ? (IppSortFunc)ippsSortAscend_64f_I :
03872 #endif
03873             0;
03874     else
03875         return depth == CV_8U ? (IppSortFunc)ippsSortDescend_8u_I :
03876 #if IPP_DISABLE_BLOCK
03877             depth == CV_16U ? (IppSortFunc)ippsSortDescend_16u_I :
03878             depth == CV_16S ? (IppSortFunc)ippsSortDescend_16s_I :
03879             depth == CV_32S ? (IppSortFunc)ippsSortDescend_32s_I :
03880             depth == CV_32F ? (IppSortFunc)ippsSortDescend_32f_I :
03881             depth == CV_64F ? (IppSortFunc)ippsSortDescend_64f_I :
03882 #endif
03883             0;
03884 }
03885 
03886 static IppFlipFunc getFlipFunc(int depth)
03887 {
03888     CV_SUPPRESS_DEPRECATED_START
03889     return
03890             depth == CV_8U || depth == CV_8S ? (IppFlipFunc)ippsFlip_8u_I :
03891             depth == CV_16U || depth == CV_16S ? (IppFlipFunc)ippsFlip_16u_I :
03892             depth == CV_32S || depth == CV_32F ? (IppFlipFunc)ippsFlip_32f_I :
03893             depth == CV_64F ? (IppFlipFunc)ippsFlip_64f_I : 0;
03894     CV_SUPPRESS_DEPRECATED_END
03895 }
03896 
03897 
03898 #endif
03899 
03900 template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
03901 {
03902     AutoBuffer<T> buf;
03903     T* bptr;
03904     int i, j, n, len;
03905     bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
03906     bool inplace = src.data == dst.data;
03907     bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
03908 
03909     if( sortRows )
03910         n = src.rows, len = src.cols;
03911     else
03912     {
03913         n = src.cols, len = src.rows;
03914         buf.allocate(len);
03915     }
03916     bptr = (T*)buf;
03917 
03918 #ifdef USE_IPP_SORT
03919     int depth = src.depth();
03920     IppSortFunc ippSortFunc = 0;
03921     IppFlipFunc ippFlipFunc = 0;
03922     CV_IPP_CHECK()
03923     {
03924         ippSortFunc = getSortFunc(depth, sortDescending);
03925         ippFlipFunc = getFlipFunc(depth);
03926     }
03927 #endif
03928 
03929     for( i = 0; i < n; i++ )
03930     {
03931         T* ptr = bptr;
03932         if( sortRows )
03933         {
03934             T* dptr = dst.ptr<T>(i);
03935             if( !inplace )
03936             {
03937                 const T* sptr = src.ptr<T>(i);
03938                 memcpy(dptr, sptr, sizeof(T) * len);
03939             }
03940             ptr = dptr;
03941         }
03942         else
03943         {
03944             for( j = 0; j < len; j++ )
03945                 ptr[j] = src.ptr<T>(j)[i];
03946         }
03947 
03948 #ifdef USE_IPP_SORT
03949         if (!ippSortFunc || ippSortFunc(ptr, len) < 0)
03950 #endif
03951         {
03952 #ifdef USE_IPP_SORT
03953             if (depth == CV_8U)
03954                 setIppErrorStatus();
03955 #endif
03956             std::sort( ptr, ptr + len );
03957             if( sortDescending )
03958             {
03959 #ifdef USE_IPP_SORT
03960                 if (!ippFlipFunc || ippFlipFunc(ptr, len) < 0)
03961 #endif
03962                 {
03963 #ifdef USE_IPP_SORT
03964                     setIppErrorStatus();
03965 #endif
03966                     for( j = 0; j < len/2; j++ )
03967                         std::swap(ptr[j], ptr[len-1-j]);
03968                 }
03969 #ifdef USE_IPP_SORT
03970                 else
03971                 {
03972                     CV_IMPL_ADD(CV_IMPL_IPP);
03973                 }
03974 #endif
03975             }
03976         }
03977 #ifdef USE_IPP_SORT
03978         else
03979         {
03980             CV_IMPL_ADD(CV_IMPL_IPP);
03981         }
03982 #endif
03983 
03984         if( !sortRows )
03985             for( j = 0; j < len; j++ )
03986                 dst.ptr<T>(j)[i] = ptr[j];
03987     }
03988 }
03989 
03990 template<typename _Tp> class LessThanIdx
03991 {
03992 public:
03993     LessThanIdx( const _Tp* _arr ) : arr(_arr) {}
03994     bool operator()(int a, int b) const { return arr[a] < arr[b]; }
03995     const _Tp* arr;
03996 };
03997 
03998 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
03999 
04000 typedef IppStatus (CV_STDCALL *IppSortIndexFunc)(void *, int *, int);
04001 
04002 static IppSortIndexFunc getSortIndexFunc(int depth, bool sortDescending)
04003 {
04004     if (!sortDescending)
04005         return depth == CV_8U ? (IppSortIndexFunc)ippsSortIndexAscend_8u_I :
04006             depth == CV_16U ? (IppSortIndexFunc)ippsSortIndexAscend_16u_I :
04007             depth == CV_16S ? (IppSortIndexFunc)ippsSortIndexAscend_16s_I :
04008             depth == CV_32S ? (IppSortIndexFunc)ippsSortIndexAscend_32s_I :
04009             depth == CV_32F ? (IppSortIndexFunc)ippsSortIndexAscend_32f_I :
04010             depth == CV_64F ? (IppSortIndexFunc)ippsSortIndexAscend_64f_I : 0;
04011     else
04012         return depth == CV_8U ? (IppSortIndexFunc)ippsSortIndexDescend_8u_I :
04013             depth == CV_16U ? (IppSortIndexFunc)ippsSortIndexDescend_16u_I :
04014             depth == CV_16S ? (IppSortIndexFunc)ippsSortIndexDescend_16s_I :
04015             depth == CV_32S ? (IppSortIndexFunc)ippsSortIndexDescend_32s_I :
04016             depth == CV_32F ? (IppSortIndexFunc)ippsSortIndexDescend_32f_I :
04017             depth == CV_64F ? (IppSortIndexFunc)ippsSortIndexDescend_64f_I : 0;
04018 }
04019 
04020 #endif
04021 
04022 template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
04023 {
04024     AutoBuffer<T> buf;
04025     AutoBuffer<int> ibuf;
04026     T* bptr;
04027     int* _iptr;
04028     int i, j, n, len;
04029     bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
04030     bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
04031 
04032     CV_Assert( src.data != dst.data );
04033 
04034     if( sortRows )
04035         n = src.rows, len = src.cols;
04036     else
04037     {
04038         n = src.cols, len = src.rows;
04039         buf.allocate(len);
04040         ibuf.allocate(len);
04041     }
04042     bptr = (T*)buf;
04043     _iptr = (int*)ibuf;
04044 
04045 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04046     int depth = src.depth();
04047     IppSortIndexFunc ippFunc = 0;
04048     IppFlipFunc ippFlipFunc = 0;
04049     CV_IPP_CHECK()
04050     {
04051         ippFunc = getSortIndexFunc(depth, sortDescending);
04052         ippFlipFunc = getFlipFunc(depth);
04053     }
04054 #endif
04055 
04056     for( i = 0; i < n; i++ )
04057     {
04058         T* ptr = bptr;
04059         int* iptr = _iptr;
04060 
04061         if( sortRows )
04062         {
04063             ptr = (T*)(src.data + src.step*i);
04064             iptr = dst.ptr<int>(i);
04065         }
04066         else
04067         {
04068             for( j = 0; j < len; j++ )
04069                 ptr[j] = src.ptr<T>(j)[i];
04070         }
04071         for( j = 0; j < len; j++ )
04072             iptr[j] = j;
04073 
04074 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04075         if (sortRows || !ippFunc || ippFunc(ptr, iptr, len) < 0)
04076 #endif
04077         {
04078 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04079             setIppErrorStatus();
04080 #endif
04081             std::sort( iptr, iptr + len, LessThanIdx<T>(ptr) );
04082             if( sortDescending )
04083             {
04084 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04085                 if (!ippFlipFunc || ippFlipFunc(iptr, len) < 0)
04086 #endif
04087                 {
04088 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04089                     setIppErrorStatus();
04090 #endif
04091                     for( j = 0; j < len/2; j++ )
04092                         std::swap(iptr[j], iptr[len-1-j]);
04093                 }
04094 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04095                 else
04096                 {
04097                     CV_IMPL_ADD(CV_IMPL_IPP);
04098                 }
04099 #endif
04100             }
04101         }
04102 #if defined USE_IPP_SORT && IPP_DISABLE_BLOCK
04103         else
04104         {
04105             CV_IMPL_ADD(CV_IMPL_IPP);
04106         }
04107 #endif
04108 
04109         if( !sortRows )
04110             for( j = 0; j < len; j++ )
04111                 dst.ptr<int>(j)[i] = iptr[j];
04112     }
04113 }
04114 
04115 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
04116 
04117 }
04118 
04119 void cv::sort( InputArray _src, OutputArray _dst, int flags )
04120 {
04121     static SortFunc tab[] =
04122     {
04123         sort_<uchar>, sort_<schar>, sort_<ushort>, sort_<short>,
04124         sort_<int>, sort_<float>, sort_<double>, 0
04125     };
04126     Mat src = _src.getMat();
04127     SortFunc func = tab[src.depth()];
04128     CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
04129     _dst.create( src.size(), src.type() );
04130     Mat dst = _dst.getMat();
04131     func( src, dst, flags );
04132 }
04133 
04134 void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
04135 {
04136     static SortFunc tab[] =
04137     {
04138         sortIdx_<uchar>, sortIdx_<schar>, sortIdx_<ushort>, sortIdx_<short>,
04139         sortIdx_<int>, sortIdx_<float>, sortIdx_<double>, 0
04140     };
04141     Mat src = _src.getMat();
04142     SortFunc func = tab[src.depth()];
04143     CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
04144 
04145     Mat dst = _dst.getMat();
04146     if( dst.data == src.data )
04147         _dst.release();
04148     _dst.create( src.size(), CV_32S );
04149     dst = _dst.getMat();
04150     func( src, dst, flags );
04151 }
04152 
04153 
04154 CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar  value )
04155 {
04156     cv::Mat m = cv::cvarrToMat(arr);
04157     cv::setIdentity(m, value);
04158 }
04159 
04160 
04161 CV_IMPL CvScalar  cvTrace( const CvArr* arr )
04162 {
04163     return cv::trace(cv::cvarrToMat(arr));
04164 }
04165 
04166 
04167 CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr )
04168 {
04169     cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
04170 
04171     CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() );
04172     transpose( src, dst );
04173 }
04174 
04175 
04176 CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR )
04177 {
04178     cv::Mat m = cv::cvarrToMat(matrix);
04179     cv::completeSymm( m, LtoR != 0 );
04180 }
04181 
04182 
04183 CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr )
04184 {
04185     cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr);
04186 
04187     CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() );
04188     srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst);
04189 }
04190 
04191 
04192 CV_IMPL void
04193 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
04194 {
04195     cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
04196 
04197     if( dim < 0 )
04198         dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
04199 
04200     if( dim > 1 )
04201         CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );
04202 
04203     if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
04204         (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
04205         CV_Error( CV_StsBadSize, "The output array size is incorrect" );
04206 
04207     if( src.channels() != dst.channels() )
04208         CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
04209 
04210     cv::reduce(src, dst, dim, op, dst.type());
04211 }
04212 
04213 
04214 CV_IMPL CvArr*
04215 cvRange( CvArr* arr, double start, double end )
04216 {
04217     int ok = 0;
04218 
04219     CvMat stub, *mat = (CvMat*)arr;
04220     double delta;
04221     int type, step;
04222     double val = start;
04223     int i, j;
04224     int rows, cols;
04225 
04226     if( !CV_IS_MAT(mat) )
04227         mat = cvGetMat( mat, &stub);
04228 
04229     rows = mat->rows;
04230     cols = mat->cols;
04231     type = CV_MAT_TYPE(mat->type);
04232     delta = (end-start)/(rows*cols);
04233 
04234     if( CV_IS_MAT_CONT(mat->type) )
04235     {
04236         cols *= rows;
04237         rows = 1;
04238         step = 1;
04239     }
04240     else
04241         step = mat->step / CV_ELEM_SIZE(type);
04242 
04243     if( type == CV_32SC1 )
04244     {
04245         int* idata = mat->data.i;
04246         int ival = cvRound(val), idelta = cvRound(delta);
04247 
04248         if( fabs(val - ival) < DBL_EPSILON &&
04249             fabs(delta - idelta) < DBL_EPSILON )
04250         {
04251             for( i = 0; i < rows; i++, idata += step )
04252                 for( j = 0; j < cols; j++, ival += idelta )
04253                     idata[j] = ival;
04254         }
04255         else
04256         {
04257             for( i = 0; i < rows; i++, idata += step )
04258                 for( j = 0; j < cols; j++, val += delta )
04259                     idata[j] = cvRound(val);
04260         }
04261     }
04262     else if( type == CV_32FC1 )
04263     {
04264         float* fdata = mat->data.fl;
04265         for( i = 0; i < rows; i++, fdata += step )
04266             for( j = 0; j < cols; j++, val += delta )
04267                 fdata[j] = (float)val;
04268     }
04269     else
04270         CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" );
04271 
04272     ok = 1;
04273     return ok ? arr : 0;
04274 }
04275 
04276 
04277 CV_IMPL void
04278 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
04279 {
04280     cv::Mat src = cv::cvarrToMat(_src);
04281 
04282     if( _idx )
04283     {
04284         cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
04285         CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data );
04286         cv::sortIdx( src, idx, flags );
04287         CV_Assert( idx0.data == idx.data );
04288     }
04289 
04290     if( _dst )
04291     {
04292         cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0;
04293         CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
04294         cv::sort( src, dst, flags );
04295         CV_Assert( dst0.data == dst.data );
04296     }
04297 }
04298 
04299 
04300 CV_IMPL int
04301 cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
04302            CvTermCriteria  termcrit, int attempts, CvRNG*,
04303            int flags, CvArr* _centers, double* _compactness )
04304 {
04305     cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
04306     if( _centers )
04307     {
04308         centers = cv::cvarrToMat(_centers);
04309 
04310         centers = centers.reshape(1);
04311         data = data.reshape(1);
04312 
04313         CV_Assert( !centers.empty() );
04314         CV_Assert( centers.rows == cluster_count );
04315         CV_Assert( centers.cols == data.cols );
04316         CV_Assert( centers.depth() == data.depth() );
04317     }
04318     CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
04319         (labels.cols == 1 || labels.rows == 1) &&
04320         labels.cols + labels.rows - 1 == data.rows );
04321 
04322     double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
04323                                     flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
04324     if( _compactness )
04325         *_compactness = compactness;
04326     return 1;
04327 }
04328 
04329 ///////////////////////////// n-dimensional matrices ////////////////////////////
04330 
04331 namespace cv
04332 {
04333 
04334 Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
04335 {
04336     if(_newndims == dims)
04337     {
04338         if(_newsz == 0)
04339             return reshape(_cn);
04340         if(_newndims == 2)
04341             return reshape(_cn, _newsz[0]);
04342     }
04343 
04344     if (isContinuous())
04345     {
04346         CV_Assert(_cn >= 0 && _newndims > 0 && _newndims <= CV_MAX_DIM && _newsz);
04347 
04348         if (_cn == 0)
04349             _cn = this->channels();
04350         else
04351             CV_Assert(_cn <= CV_CN_MAX);
04352 
04353         size_t total_elem1_ref = this->total() * this->channels();
04354         size_t total_elem1 = _cn;
04355 
04356         AutoBuffer<int, 4> newsz_buf( (size_t)_newndims );
04357 
04358         for (int i = 0; i < _newndims; i++)
04359         {
04360             CV_Assert(_newsz[i] >= 0);
04361 
04362             if (_newsz[i] > 0)
04363                 newsz_buf[i] = _newsz[i];
04364             else if (i < dims)
04365                 newsz_buf[i] = this->size[i];
04366             else
04367                 CV_Error(CV_StsOutOfRange, "Copy dimension (which has zero size) is not present in source matrix");
04368 
04369             total_elem1 *= (size_t)newsz_buf[i];
04370         }
04371 
04372         if (total_elem1 != total_elem1_ref)
04373             CV_Error(CV_StsUnmatchedSizes, "Requested and source matrices have different count of elements");
04374 
04375         Mat hdr = *this;
04376         hdr.flags  = (hdr.flags  & ~CV_MAT_CN_MASK) | ((_cn-1) << CV_CN_SHIFT);
04377         setSize(hdr, _newndims, (int*)newsz_buf, NULL, true);
04378 
04379         return hdr;
04380     }
04381 
04382     CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet");
04383     // TBD
04384     return Mat();
04385 }
04386 
04387 NAryMatIterator::NAryMatIterator()
04388     : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
04389 {
04390 }
04391 
04392 NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays)
04393 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
04394 {
04395     init(_arrays, _planes, 0, _narrays);
04396 }
04397 
04398 NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
04399     : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
04400 {
04401     init(_arrays, 0, _ptrs, _narrays);
04402 }
04403 
04404 void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
04405 {
04406     CV_Assert( _arrays && (_ptrs || _planes) );
04407     int i, j, d1=0, i0 = -1, d = -1;
04408 
04409     arrays = _arrays;
04410     ptrs = _ptrs;
04411     planes = _planes;
04412     narrays = _narrays;
04413     nplanes = 0;
04414     size = 0;
04415 
04416     if( narrays < 0 )
04417     {
04418         for( i = 0; _arrays[i] != 0; i++ )
04419             ;
04420         narrays = i;
04421         CV_Assert(narrays <= 1000);
04422     }
04423 
04424     iterdepth = 0;
04425 
04426     for( i = 0; i < narrays; i++ )
04427     {
04428         CV_Assert(arrays[i] != 0);
04429         const Mat& A = *arrays[i];
04430         if( ptrs )
04431             ptrs[i] = A.data;
04432 
04433         if( !A.data )
04434             continue;
04435 
04436         if( i0 < 0 )
04437         {
04438             i0 = i;
04439             d = A.dims;
04440 
04441             // find the first dimensionality which is different from 1;
04442             // in any of the arrays the first "d1" step do not affect the continuity
04443             for( d1 = 0; d1 < d; d1++ )
04444                 if( A.size[d1] > 1 )
04445                     break;
04446         }
04447         else
04448             CV_Assert( A.size == arrays[i0]->size );
04449 
04450         if( !A.isContinuous() )
04451         {
04452             CV_Assert( A.step[d-1] == A.elemSize() );
04453             for( j = d-1; j > d1; j-- )
04454                 if( A.step[j]*A.size[j] < A.step[j-1] )
04455                     break;
04456             iterdepth = std::max(iterdepth, j);
04457         }
04458     }
04459 
04460     if( i0 >= 0 )
04461     {
04462         size = arrays[i0]->size[d-1];
04463         for( j = d-1; j > iterdepth; j-- )
04464         {
04465             int64 total1 = (int64)size*arrays[i0]->size[j-1];
04466             if( total1 != (int)total1 )
04467                 break;
04468             size = (int)total1;
04469         }
04470 
04471         iterdepth = j;
04472         if( iterdepth == d1 )
04473             iterdepth = 0;
04474 
04475         nplanes = 1;
04476         for( j = iterdepth-1; j >= 0; j-- )
04477             nplanes *= arrays[i0]->size[j];
04478     }
04479     else
04480         iterdepth = 0;
04481 
04482     idx = 0;
04483 
04484     if( !planes )
04485         return;
04486 
04487     for( i = 0; i < narrays; i++ )
04488     {
04489         CV_Assert(arrays[i] != 0);
04490         const Mat& A = *arrays[i];
04491 
04492         if( !A.data )
04493         {
04494             planes[i] = Mat();
04495             continue;
04496         }
04497 
04498         planes[i] = Mat(1, (int)size, A.type(), A.data);
04499     }
04500 }
04501 
04502 
04503 NAryMatIterator& NAryMatIterator::operator ++()
04504 {
04505     if( idx >= nplanes-1 )
04506         return *this;
04507     ++idx;
04508 
04509     if( iterdepth == 1 )
04510     {
04511         if( ptrs )
04512         {
04513             for( int i = 0; i < narrays; i++ )
04514             {
04515                 if( !ptrs[i] )
04516                     continue;
04517                 ptrs[i] = arrays[i]->data + arrays[i]->step[0]*idx;
04518             }
04519         }
04520         if( planes )
04521         {
04522             for( int i = 0; i < narrays; i++ )
04523             {
04524                 if( !planes[i].data )
04525                     continue;
04526                 planes[i].data = arrays[i]->data + arrays[i]->step[0]*idx;
04527             }
04528         }
04529     }
04530     else
04531     {
04532         for( int i = 0; i < narrays; i++ )
04533         {
04534             const Mat& A = *arrays[i];
04535             if( !A.data )
04536                 continue;
04537             int _idx = (int)idx;
04538             uchar* data = A.data;
04539             for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- )
04540             {
04541                 int szi = A.size[j], t = _idx/szi;
04542                 data += (_idx - t * szi)*A.step[j];
04543                 _idx = t;
04544             }
04545             if( ptrs )
04546                 ptrs[i] = data;
04547             if( planes )
04548                 planes[i].data = data;
04549         }
04550     }
04551 
04552     return *this;
04553 }
04554 
04555 NAryMatIterator NAryMatIterator::operator ++(int)
04556 {
04557     NAryMatIterator it = *this;
04558     ++*this;
04559     return it;
04560 }
04561 
04562 ///////////////////////////////////////////////////////////////////////////
04563 //                              MatConstIterator                         //
04564 ///////////////////////////////////////////////////////////////////////////
04565 
04566 Point MatConstIterator::pos() const
04567 {
04568     if( !m )
04569         return Point();
04570     CV_DbgAssert(m->dims <= 2);
04571 
04572     ptrdiff_t ofs = ptr - m->ptr();
04573     int y = (int)(ofs/m->step[0]);
04574     return Point((int)((ofs - y*m->step[0])/elemSize), y);
04575 }
04576 
04577 void MatConstIterator::pos(int* _idx) const
04578 {
04579     CV_Assert(m != 0 && _idx);
04580     ptrdiff_t ofs = ptr - m->ptr();
04581     for( int i = 0; i < m->dims; i++ )
04582     {
04583         size_t s = m->step[i], v = ofs/s;
04584         ofs -= v*s;
04585         _idx[i] = (int)v;
04586     }
04587 }
04588 
04589 ptrdiff_t MatConstIterator::lpos() const
04590 {
04591     if(!m)
04592         return 0;
04593     if( m->isContinuous() )
04594         return (ptr - sliceStart)/elemSize;
04595     ptrdiff_t ofs = ptr - m->ptr();
04596     int i, d = m->dims;
04597     if( d == 2 )
04598     {
04599         ptrdiff_t y = ofs/m->step[0];
04600         return y*m->cols + (ofs - y*m->step[0])/elemSize;
04601     }
04602     ptrdiff_t result = 0;
04603     for( i = 0; i < d; i++ )
04604     {
04605         size_t s = m->step[i], v = ofs/s;
04606         ofs -= v*s;
04607         result = result*m->size[i] + v;
04608     }
04609     return result;
04610 }
04611 
04612 void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
04613 {
04614     if( m->isContinuous() )
04615     {
04616         ptr = (relative ? ptr : sliceStart) + ofs*elemSize;
04617         if( ptr < sliceStart )
04618             ptr = sliceStart;
04619         else if( ptr > sliceEnd )
04620             ptr = sliceEnd;
04621         return;
04622     }
04623 
04624     int d = m->dims;
04625     if( d == 2 )
04626     {
04627         ptrdiff_t ofs0, y;
04628         if( relative )
04629         {
04630             ofs0 = ptr - m->ptr();
04631             y = ofs0/m->step[0];
04632             ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
04633         }
04634         y = ofs/m->cols;
04635         int y1 = std::min(std::max((int)y, 0), m->rows-1);
04636         sliceStart = m->ptr(y1);
04637         sliceEnd = sliceStart + m->cols*elemSize;
04638         ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
04639             sliceStart + (ofs - y*m->cols)*elemSize;
04640         return;
04641     }
04642 
04643     if( relative )
04644         ofs += lpos();
04645 
04646     if( ofs < 0 )
04647         ofs = 0;
04648 
04649     int szi = m->size[d-1];
04650     ptrdiff_t t = ofs/szi;
04651     int v = (int)(ofs - t*szi);
04652     ofs = t;
04653     ptr = m->ptr() + v*elemSize;
04654     sliceStart = m->ptr();
04655 
04656     for( int i = d-2; i >= 0; i-- )
04657     {
04658         szi = m->size[i];
04659         t = ofs/szi;
04660         v = (int)(ofs - t*szi);
04661         ofs = t;
04662         sliceStart += v*m->step[i];
04663     }
04664 
04665     sliceEnd = sliceStart + m->size[d-1]*elemSize;
04666     if( ofs > 0 )
04667         ptr = sliceEnd;
04668     else
04669         ptr = sliceStart + (ptr - m->ptr());
04670 }
04671 
04672 void MatConstIterator::seek(const int* _idx, bool relative)
04673 {
04674     int i, d = m->dims;
04675     ptrdiff_t ofs = 0;
04676     if( !_idx )
04677         ;
04678     else if( d == 2 )
04679         ofs = _idx[0]*m->size[1] + _idx[1];
04680     else
04681     {
04682         for( i = 0; i < d; i++ )
04683             ofs = ofs*m->size[i] + _idx[i];
04684     }
04685     seek(ofs, relative);
04686 }
04687 
04688 //////////////////////////////// SparseMat ////////////////////////////////
04689 
04690 template<typename T1, typename T2> void
04691 convertData_(const void* _from, void* _to, int cn)
04692 {
04693     const T1* from = (const T1*)_from;
04694     T2* to = (T2*)_to;
04695     if( cn == 1 )
04696         *to = saturate_cast<T2>(*from);
04697     else
04698         for( int i = 0; i < cn; i++ )
04699             to[i] = saturate_cast<T2>(from[i]);
04700 }
04701 
04702 template<typename T1, typename T2> void
04703 convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta)
04704 {
04705     const T1* from = (const T1*)_from;
04706     T2* to = (T2*)_to;
04707     if( cn == 1 )
04708         *to = saturate_cast<T2>(*from*alpha + beta);
04709     else
04710         for( int i = 0; i < cn; i++ )
04711             to[i] = saturate_cast<T2>(from[i]*alpha + beta);
04712 }
04713 
04714 typedef void (*ConvertData)(const void* from, void* to, int cn);
04715 typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta);
04716 
04717 static ConvertData getConvertElem(int fromType, int toType)
04718 {
04719     static ConvertData tab[][8] =
04720     {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
04721       convertData_<uchar, ushort>, convertData_<uchar, short>,
04722       convertData_<uchar, int>, convertData_<uchar, float>,
04723       convertData_<uchar, double>, 0 },
04724 
04725     { convertData_<schar, uchar>, convertData_<schar, schar>,
04726       convertData_<schar, ushort>, convertData_<schar, short>,
04727       convertData_<schar, int>, convertData_<schar, float>,
04728       convertData_<schar, double>, 0 },
04729 
04730     { convertData_<ushort, uchar>, convertData_<ushort, schar>,
04731       convertData_<ushort, ushort>, convertData_<ushort, short>,
04732       convertData_<ushort, int>, convertData_<ushort, float>,
04733       convertData_<ushort, double>, 0 },
04734 
04735     { convertData_<short, uchar>, convertData_<short, schar>,
04736       convertData_<short, ushort>, convertData_<short, short>,
04737       convertData_<short, int>, convertData_<short, float>,
04738       convertData_<short, double>, 0 },
04739 
04740     { convertData_<int, uchar>, convertData_<int, schar>,
04741       convertData_<int, ushort>, convertData_<int, short>,
04742       convertData_<int, int>, convertData_<int, float>,
04743       convertData_<int, double>, 0 },
04744 
04745     { convertData_<float, uchar>, convertData_<float, schar>,
04746       convertData_<float, ushort>, convertData_<float, short>,
04747       convertData_<float, int>, convertData_<float, float>,
04748       convertData_<float, double>, 0 },
04749 
04750     { convertData_<double, uchar>, convertData_<double, schar>,
04751       convertData_<double, ushort>, convertData_<double, short>,
04752       convertData_<double, int>, convertData_<double, float>,
04753       convertData_<double, double>, 0 },
04754 
04755     { 0, 0, 0, 0, 0, 0, 0, 0 }};
04756 
04757     ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
04758     CV_Assert( func != 0 );
04759     return func;
04760 }
04761 
04762 static ConvertScaleData getConvertScaleElem(int fromType, int toType)
04763 {
04764     static ConvertScaleData tab[][8] =
04765     {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
04766       convertScaleData_<uchar, ushort>, convertScaleData_<uchar, short>,
04767       convertScaleData_<uchar, int>, convertScaleData_<uchar, float>,
04768       convertScaleData_<uchar, double>, 0 },
04769 
04770     { convertScaleData_<schar, uchar>, convertScaleData_<schar, schar>,
04771       convertScaleData_<schar, ushort>, convertScaleData_<schar, short>,
04772       convertScaleData_<schar, int>, convertScaleData_<schar, float>,
04773       convertScaleData_<schar, double>, 0 },
04774 
04775     { convertScaleData_<ushort, uchar>, convertScaleData_<ushort, schar>,
04776       convertScaleData_<ushort, ushort>, convertScaleData_<ushort, short>,
04777       convertScaleData_<ushort, int>, convertScaleData_<ushort, float>,
04778       convertScaleData_<ushort, double>, 0 },
04779 
04780     { convertScaleData_<short, uchar>, convertScaleData_<short, schar>,
04781       convertScaleData_<short, ushort>, convertScaleData_<short, short>,
04782       convertScaleData_<short, int>, convertScaleData_<short, float>,
04783       convertScaleData_<short, double>, 0 },
04784 
04785     { convertScaleData_<int, uchar>, convertScaleData_<int, schar>,
04786       convertScaleData_<int, ushort>, convertScaleData_<int, short>,
04787       convertScaleData_<int, int>, convertScaleData_<int, float>,
04788       convertScaleData_<int, double>, 0 },
04789 
04790     { convertScaleData_<float, uchar>, convertScaleData_<float, schar>,
04791       convertScaleData_<float, ushort>, convertScaleData_<float, short>,
04792       convertScaleData_<float, int>, convertScaleData_<float, float>,
04793       convertScaleData_<float, double>, 0 },
04794 
04795     { convertScaleData_<double, uchar>, convertScaleData_<double, schar>,
04796       convertScaleData_<double, ushort>, convertScaleData_<double, short>,
04797       convertScaleData_<double, int>, convertScaleData_<double, float>,
04798       convertScaleData_<double, double>, 0 },
04799 
04800     { 0, 0, 0, 0, 0, 0, 0, 0 }};
04801 
04802     ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
04803     CV_Assert( func != 0 );
04804     return func;
04805 }
04806 
04807 enum { HASH_SIZE0 = 8 };
04808 
04809 static inline void copyElem(const uchar* from, uchar* to, size_t elemSize)
04810 {
04811     size_t i;
04812     for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
04813         *(int*)(to + i) = *(const int*)(from + i);
04814     for( ; i < elemSize; i++ )
04815         to[i] = from[i];
04816 }
04817 
04818 static inline bool isZeroElem(const uchar* data, size_t elemSize)
04819 {
04820     size_t i;
04821     for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
04822         if( *(int*)(data + i) != 0 )
04823             return false;
04824     for( ; i < elemSize; i++ )
04825         if( data[i] != 0 )
04826             return false;
04827     return true;
04828 }
04829 
04830 SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
04831 {
04832     refcount = 1;
04833 
04834     dims = _dims;
04835     valueOffset = (int)alignSize(sizeof(SparseMat::Node) - MAX_DIM*sizeof(int) +
04836                                  dims*sizeof(int), CV_ELEM_SIZE1(_type));
04837     nodeSize = alignSize(valueOffset +
04838         CV_ELEM_SIZE(_type), (int)sizeof(size_t));
04839 
04840     int i;
04841     for( i = 0; i < dims; i++ )
04842         size[i] = _sizes[i];
04843     for( ; i < CV_MAX_DIM; i++ )
04844         size[i] = 0;
04845     clear();
04846 }
04847 
04848 void SparseMat::Hdr::clear()
04849 {
04850     hashtab.clear();
04851     hashtab.resize(HASH_SIZE0);
04852     pool.clear();
04853     pool.resize(nodeSize);
04854     nodeCount = freeList = 0;
04855 }
04856 
04857 
04858 SparseMat::SparseMat(const Mat& m)
04859 : flags(MAGIC_VAL), hdr(0)
04860 {
04861     create( m.dims, m.size, m.type() );
04862 
04863     int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
04864     size_t esz = m.elemSize();
04865     const uchar* dptr = m.ptr();
04866 
04867     for(;;)
04868     {
04869         for( i = 0; i < lastSize; i++, dptr += esz )
04870         {
04871             if( isZeroElem(dptr, esz) )
04872                 continue;
04873             idx[d-1] = i;
04874             uchar* to = newNode(idx, hash(idx));
04875             copyElem( dptr, to, esz );
04876         }
04877 
04878         for( i = d - 2; i >= 0; i-- )
04879         {
04880             dptr += m.step[i] - m.size[i+1]*m.step[i+1];
04881             if( ++idx[i] < m.size[i] )
04882                 break;
04883             idx[i] = 0;
04884         }
04885         if( i < 0 )
04886             break;
04887     }
04888 }
04889 
04890 void SparseMat::create(int d, const int* _sizes, int _type)
04891 {
04892     int i;
04893     CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM );
04894     for( i = 0; i < d; i++ )
04895         CV_Assert( _sizes[i] > 0 );
04896     _type = CV_MAT_TYPE(_type);
04897     if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 )
04898     {
04899         for( i = 0; i < d; i++ )
04900             if( _sizes[i] != hdr->size[i] )
04901                 break;
04902         if( i == d )
04903         {
04904             clear();
04905             return;
04906         }
04907     }
04908     release();
04909     flags = MAGIC_VAL | _type;
04910     hdr = new Hdr(d, _sizes, _type);
04911 }
04912 
04913 void SparseMat::copyTo( SparseMat& m ) const
04914 {
04915     if( hdr == m.hdr )
04916         return;
04917     if( !hdr )
04918     {
04919         m.release();
04920         return;
04921     }
04922     m.create( hdr->dims, hdr->size, type() );
04923     SparseMatConstIterator from = begin();
04924     size_t i, N = nzcount(), esz = elemSize();
04925 
04926     for( i = 0; i < N; i++, ++from )
04927     {
04928         const Node* n = from.node();
04929         uchar* to = m.newNode(n->idx, n->hashval);
04930         copyElem( from.ptr, to, esz );
04931     }
04932 }
04933 
04934 void SparseMat::copyTo( Mat& m ) const
04935 {
04936     CV_Assert( hdr );
04937     int ndims = dims();
04938     m.create( ndims, hdr->size, type() );
04939     m = Scalar (0);
04940 
04941     SparseMatConstIterator from = begin();
04942     size_t i, N = nzcount(), esz = elemSize();
04943 
04944     for( i = 0; i < N; i++, ++from )
04945     {
04946         const Node* n = from.node();
04947         copyElem( from.ptr, (ndims > 1 ? m.ptr(n->idx) : m.ptr(n->idx[0])), esz);
04948     }
04949 }
04950 
04951 
04952 void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
04953 {
04954     int cn = channels();
04955     if( rtype < 0 )
04956         rtype = type();
04957     rtype = CV_MAKETYPE(rtype, cn);
04958     if( hdr == m.hdr && rtype != type()  )
04959     {
04960         SparseMat temp;
04961         convertTo(temp, rtype, alpha);
04962         m = temp;
04963         return;
04964     }
04965 
04966     CV_Assert(hdr != 0);
04967     if( hdr != m.hdr )
04968         m.create( hdr->dims, hdr->size, rtype );
04969 
04970     SparseMatConstIterator from = begin();
04971     size_t i, N = nzcount();
04972 
04973     if( alpha == 1 )
04974     {
04975         ConvertData cvtfunc = getConvertElem(type(), rtype);
04976         for( i = 0; i < N; i++, ++from )
04977         {
04978             const Node* n = from.node();
04979             uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
04980             cvtfunc( from.ptr, to, cn );
04981         }
04982     }
04983     else
04984     {
04985         ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
04986         for( i = 0; i < N; i++, ++from )
04987         {
04988             const Node* n = from.node();
04989             uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
04990             cvtfunc( from.ptr, to, cn, alpha, 0 );
04991         }
04992     }
04993 }
04994 
04995 
04996 void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
04997 {
04998     int cn = channels();
04999     if( rtype < 0 )
05000         rtype = type();
05001     rtype = CV_MAKETYPE(rtype, cn);
05002 
05003     CV_Assert( hdr );
05004     m.create( dims(), hdr->size, rtype );
05005     m = Scalar (beta);
05006 
05007     SparseMatConstIterator from = begin();
05008     size_t i, N = nzcount();
05009 
05010     if( alpha == 1 && beta == 0 )
05011     {
05012         ConvertData cvtfunc = getConvertElem(type(), rtype);
05013         for( i = 0; i < N; i++, ++from )
05014         {
05015             const Node* n = from.node();
05016             uchar* to = m.ptr(n->idx);
05017             cvtfunc( from.ptr, to, cn );
05018         }
05019     }
05020     else
05021     {
05022         ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
05023         for( i = 0; i < N; i++, ++from )
05024         {
05025             const Node* n = from.node();
05026             uchar* to = m.ptr(n->idx);
05027             cvtfunc( from.ptr, to, cn, alpha, beta );
05028         }
05029     }
05030 }
05031 
05032 void SparseMat::clear()
05033 {
05034     if( hdr )
05035         hdr->clear();
05036 }
05037 
05038 uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
05039 {
05040     CV_Assert( hdr && hdr->dims == 1 );
05041     size_t h = hashval ? *hashval : hash(i0);
05042     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
05043     uchar* pool = &hdr->pool[0];
05044     while( nidx != 0 )
05045     {
05046         Node* elem = (Node*)(pool + nidx);
05047         if( elem->hashval == h && elem->idx[0] == i0 )
05048             return &value<uchar>(elem);
05049         nidx = elem->next;
05050     }
05051 
05052     if( createMissing )
05053     {
05054         int idx[] = { i0 };
05055         return newNode( idx, h );
05056     }
05057     return 0;
05058 }
05059 
05060 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
05061 {
05062     CV_Assert( hdr && hdr->dims == 2 );
05063     size_t h = hashval ? *hashval : hash(i0, i1);
05064     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
05065     uchar* pool = &hdr->pool[0];
05066     while( nidx != 0 )
05067     {
05068         Node* elem = (Node*)(pool + nidx);
05069         if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
05070             return &value<uchar>(elem);
05071         nidx = elem->next;
05072     }
05073 
05074     if( createMissing )
05075     {
05076         int idx[] = { i0, i1 };
05077         return newNode( idx, h );
05078     }
05079     return 0;
05080 }
05081 
05082 uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval)
05083 {
05084     CV_Assert( hdr && hdr->dims == 3 );
05085     size_t h = hashval ? *hashval : hash(i0, i1, i2);
05086     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
05087     uchar* pool = &hdr->pool[0];
05088     while( nidx != 0 )
05089     {
05090         Node* elem = (Node*)(pool + nidx);
05091         if( elem->hashval == h && elem->idx[0] == i0 &&
05092             elem->idx[1] == i1 && elem->idx[2] == i2 )
05093             return &value<uchar>(elem);
05094         nidx = elem->next;
05095     }
05096 
05097     if( createMissing )
05098     {
05099         int idx[] = { i0, i1, i2 };
05100         return newNode( idx, h );
05101     }
05102     return 0;
05103 }
05104 
05105 uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval)
05106 {
05107     CV_Assert( hdr );
05108     int i, d = hdr->dims;
05109     size_t h = hashval ? *hashval : hash(idx);
05110     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
05111     uchar* pool = &hdr->pool[0];
05112     while( nidx != 0 )
05113     {
05114         Node* elem = (Node*)(pool + nidx);
05115         if( elem->hashval == h )
05116         {
05117             for( i = 0; i < d; i++ )
05118                 if( elem->idx[i] != idx[i] )
05119                     break;
05120             if( i == d )
05121                 return &value<uchar>(elem);
05122         }
05123         nidx = elem->next;
05124     }
05125 
05126     return createMissing ? newNode(idx, h) : 0;
05127 }
05128 
05129 void SparseMat::erase(int i0, int i1, size_t* hashval)
05130 {
05131     CV_Assert( hdr && hdr->dims == 2 );
05132     size_t h = hashval ? *hashval : hash(i0, i1);
05133     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
05134     uchar* pool = &hdr->pool[0];
05135     while( nidx != 0 )
05136     {
05137         Node* elem = (Node*)(pool + nidx);
05138         if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
05139             break;
05140         previdx = nidx;
05141         nidx = elem->next;
05142     }
05143 
05144     if( nidx )
05145         removeNode(hidx, nidx, previdx);
05146 }
05147 
05148 void SparseMat::erase(int i0, int i1, int i2, size_t* hashval)
05149 {
05150     CV_Assert( hdr && hdr->dims == 3 );
05151     size_t h = hashval ? *hashval : hash(i0, i1, i2);
05152     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
05153     uchar* pool = &hdr->pool[0];
05154     while( nidx != 0 )
05155     {
05156         Node* elem = (Node*)(pool + nidx);
05157         if( elem->hashval == h && elem->idx[0] == i0 &&
05158             elem->idx[1] == i1 && elem->idx[2] == i2 )
05159             break;
05160         previdx = nidx;
05161         nidx = elem->next;
05162     }
05163 
05164     if( nidx )
05165         removeNode(hidx, nidx, previdx);
05166 }
05167 
05168 void SparseMat::erase(const int* idx, size_t* hashval)
05169 {
05170     CV_Assert( hdr );
05171     int i, d = hdr->dims;
05172     size_t h = hashval ? *hashval : hash(idx);
05173     size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
05174     uchar* pool = &hdr->pool[0];
05175     while( nidx != 0 )
05176     {
05177         Node* elem = (Node*)(pool + nidx);
05178         if( elem->hashval == h )
05179         {
05180             for( i = 0; i < d; i++ )
05181                 if( elem->idx[i] != idx[i] )
05182                     break;
05183             if( i == d )
05184                 break;
05185         }
05186         previdx = nidx;
05187         nidx = elem->next;
05188     }
05189 
05190     if( nidx )
05191         removeNode(hidx, nidx, previdx);
05192 }
05193 
05194 void SparseMat::resizeHashTab(size_t newsize)
05195 {
05196     newsize = std::max(newsize, (size_t)8);
05197     if((newsize & (newsize-1)) != 0)
05198         newsize = (size_t)1 << cvCeil(std::log((double)newsize)/CV_LOG2);
05199 
05200     size_t i, hsize = hdr->hashtab.size();
05201     std::vector<size_t> _newh(newsize);
05202     size_t* newh = &_newh[0];
05203     for( i = 0; i < newsize; i++ )
05204         newh[i] = 0;
05205     uchar* pool = &hdr->pool[0];
05206     for( i = 0; i < hsize; i++ )
05207     {
05208         size_t nidx = hdr->hashtab[i];
05209         while( nidx )
05210         {
05211             Node* elem = (Node*)(pool + nidx);
05212             size_t next = elem->next;
05213             size_t newhidx = elem->hashval & (newsize - 1);
05214             elem->next = newh[newhidx];
05215             newh[newhidx] = nidx;
05216             nidx = next;
05217         }
05218     }
05219     hdr->hashtab = _newh;
05220 }
05221 
05222 uchar* SparseMat::newNode(const int* idx, size_t hashval)
05223 {
05224     const int HASH_MAX_FILL_FACTOR=3;
05225     assert(hdr);
05226     size_t hsize = hdr->hashtab.size();
05227     if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
05228     {
05229         resizeHashTab(std::max(hsize*2, (size_t)8));
05230         hsize = hdr->hashtab.size();
05231     }
05232 
05233     if( !hdr->freeList )
05234     {
05235         size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
05236             newpsize = std::max(psize*3/2, 8*nsz);
05237         newpsize = (newpsize/nsz)*nsz;
05238         hdr->pool.resize(newpsize);
05239         uchar* pool = &hdr->pool[0];
05240         hdr->freeList = std::max(psize, nsz);
05241         for( i = hdr->freeList; i < newpsize - nsz; i += nsz )
05242             ((Node*)(pool + i))->next = i + nsz;
05243         ((Node*)(pool + i))->next = 0;
05244     }
05245     size_t nidx = hdr->freeList;
05246     Node* elem = (Node*)&hdr->pool[nidx];
05247     hdr->freeList = elem->next;
05248     elem->hashval = hashval;
05249     size_t hidx = hashval & (hsize - 1);
05250     elem->next = hdr->hashtab[hidx];
05251     hdr->hashtab[hidx] = nidx;
05252 
05253     int i, d = hdr->dims;
05254     for( i = 0; i < d; i++ )
05255         elem->idx[i] = idx[i];
05256     size_t esz = elemSize();
05257     uchar* p = &value<uchar>(elem);
05258     if( esz == sizeof(float) )
05259         *((float*)p) = 0.f;
05260     else if( esz == sizeof(double) )
05261         *((double*)p) = 0.;
05262     else
05263         memset(p, 0, esz);
05264 
05265     return p;
05266 }
05267 
05268 
05269 void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx)
05270 {
05271     Node* n = node(nidx);
05272     if( previdx )
05273     {
05274         Node* prev = node(previdx);
05275         prev->next = n->next;
05276     }
05277     else
05278         hdr->hashtab[hidx] = n->next;
05279     n->next = hdr->freeList;
05280     hdr->freeList = nidx;
05281     --hdr->nodeCount;
05282 }
05283 
05284 
05285 SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m)
05286 : m((SparseMat*)_m), hashidx(0), ptr(0)
05287 {
05288     if(!_m || !_m->hdr)
05289         return;
05290     SparseMat::Hdr& hdr = *m->hdr;
05291     const std::vector<size_t>& htab = hdr.hashtab;
05292     size_t i, hsize = htab.size();
05293     for( i = 0; i < hsize; i++ )
05294     {
05295         size_t nidx = htab[i];
05296         if( nidx )
05297         {
05298             hashidx = i;
05299             ptr = &hdr.pool[nidx] + hdr.valueOffset;
05300             return;
05301         }
05302     }
05303 }
05304 
05305 SparseMatConstIterator& SparseMatConstIterator::operator ++()
05306 {
05307     if( !ptr || !m || !m->hdr )
05308         return *this;
05309     SparseMat::Hdr& hdr = *m->hdr;
05310     size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next;
05311     if( next )
05312     {
05313         ptr = &hdr.pool[next] + hdr.valueOffset;
05314         return *this;
05315     }
05316     size_t i = hashidx + 1, sz = hdr.hashtab.size();
05317     for( ; i < sz; i++ )
05318     {
05319         size_t nidx = hdr.hashtab[i];
05320         if( nidx )
05321         {
05322             hashidx = i;
05323             ptr = &hdr.pool[nidx] + hdr.valueOffset;
05324             return *this;
05325         }
05326     }
05327     hashidx = sz;
05328     ptr = 0;
05329     return *this;
05330 }
05331 
05332 
05333 double norm( const SparseMat& src, int normType )
05334 {
05335     SparseMatConstIterator it = src.begin();
05336 
05337     size_t i, N = src.nzcount();
05338     normType &= NORM_TYPE_MASK;
05339     int type = src.type();
05340     double result = 0;
05341 
05342     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
05343 
05344     if( type == CV_32F )
05345     {
05346         if( normType == NORM_INF )
05347             for( i = 0; i < N; i++, ++it )
05348                 result = std::max(result, std::abs((double)it.value<float>()));
05349         else if( normType == NORM_L1 )
05350             for( i = 0; i < N; i++, ++it )
05351                 result += std::abs(it.value<float>());
05352         else
05353             for( i = 0; i < N; i++, ++it )
05354             {
05355                 double v = it.value<float>();
05356                 result += v*v;
05357             }
05358     }
05359     else if( type == CV_64F )
05360     {
05361         if( normType == NORM_INF )
05362             for( i = 0; i < N; i++, ++it )
05363                 result = std::max(result, std::abs(it.value<double>()));
05364         else if( normType == NORM_L1 )
05365             for( i = 0; i < N; i++, ++it )
05366                 result += std::abs(it.value<double>());
05367         else
05368             for( i = 0; i < N; i++, ++it )
05369             {
05370                 double v = it.value<double>();
05371                 result += v*v;
05372             }
05373     }
05374     else
05375         CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
05376 
05377     if( normType == NORM_L2 )
05378         result = std::sqrt(result);
05379     return result;
05380 }
05381 
05382 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
05383 {
05384     SparseMatConstIterator it = src.begin();
05385     size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
05386     int type = src.type();
05387     const int *minidx = 0, *maxidx = 0;
05388 
05389     if( type == CV_32F )
05390     {
05391         float minval = FLT_MAX, maxval = -FLT_MAX;
05392         for( i = 0; i < N; i++, ++it )
05393         {
05394             float v = it.value<float>();
05395             if( v < minval )
05396             {
05397                 minval = v;
05398                 minidx = it.node()->idx;
05399             }
05400             if( v > maxval )
05401             {
05402                 maxval = v;
05403                 maxidx = it.node()->idx;
05404             }
05405         }
05406         if( _minval )
05407             *_minval = minval;
05408         if( _maxval )
05409             *_maxval = maxval;
05410     }
05411     else if( type == CV_64F )
05412     {
05413         double minval = DBL_MAX, maxval = -DBL_MAX;
05414         for( i = 0; i < N; i++, ++it )
05415         {
05416             double v = it.value<double>();
05417             if( v < minval )
05418             {
05419                 minval = v;
05420                 minidx = it.node()->idx;
05421             }
05422             if( v > maxval )
05423             {
05424                 maxval = v;
05425                 maxidx = it.node()->idx;
05426             }
05427         }
05428         if( _minval )
05429             *_minval = minval;
05430         if( _maxval )
05431             *_maxval = maxval;
05432     }
05433     else
05434         CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
05435 
05436     if( _minidx )
05437         for( i = 0; i < d; i++ )
05438             _minidx[i] = minidx[i];
05439     if( _maxidx )
05440         for( i = 0; i < d; i++ )
05441             _maxidx[i] = maxidx[i];
05442 }
05443 
05444 
05445 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
05446 {
05447     double scale = 1;
05448     if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
05449     {
05450         scale = norm( src, norm_type );
05451         scale = scale > DBL_EPSILON ? a/scale : 0.;
05452     }
05453     else
05454         CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
05455 
05456     src.convertTo( dst, -1, scale );
05457 }
05458 
05459 ////////////////////// RotatedRect //////////////////////
05460 
05461 RotatedRect::RotatedRect(const Point2f & _point1, const Point2f & _point2, const Point2f & _point3)
05462 {
05463     Point2f  _center = 0.5f * (_point1 + _point3);
05464     Vec2f vecs[2];
05465     vecs[0] = Vec2f(_point1 - _point2);
05466     vecs[1] = Vec2f(_point2 - _point3);
05467     // check that given sides are perpendicular
05468     CV_Assert( abs(vecs[0].dot(vecs[1])) / (norm(vecs[0]) * norm(vecs[1])) <= FLT_EPSILON );
05469 
05470     // wd_i stores which vector (0,1) or (1,2) will make the width
05471     // One of them will definitely have slope within -1 to 1
05472     int wd_i = 0;
05473     if( abs(vecs[1][1]) < abs(vecs[1][0]) ) wd_i = 1;
05474     int ht_i = (wd_i + 1) % 2;
05475 
05476     float _angle = atan(vecs[wd_i][1] / vecs[wd_i][0]) * 180.0f / (float) CV_PI;
05477     float _width = (float) norm(vecs[wd_i]);
05478     float _height = (float) norm(vecs[ht_i]);
05479 
05480     center = _center;
05481     size = Size2f (_width, _height);
05482     angle = _angle;
05483 }
05484 
05485 void RotatedRect::points(Point2f  pt[]) const
05486 {
05487     double _angle = angle*CV_PI/180.;
05488     float b = (float)cos(_angle)*0.5f;
05489     float a = (float)sin(_angle)*0.5f;
05490 
05491     pt[0].x = center.x - a*size.height - b*size.width;
05492     pt[0].y = center.y + b*size.height - a*size.width;
05493     pt[1].x = center.x + a*size.height - b*size.width;
05494     pt[1].y = center.y - b*size.height - a*size.width;
05495     pt[2].x = 2*center.x - pt[0].x;
05496     pt[2].y = 2*center.y - pt[0].y;
05497     pt[3].x = 2*center.x - pt[1].x;
05498     pt[3].y = 2*center.y - pt[1].y;
05499 }
05500 
05501 Rect RotatedRect::boundingRect() const
05502 {
05503     Point2f  pt[4];
05504     points(pt);
05505     Rect r(cvFloor(std::min(std::min(std::min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
05506            cvFloor(std::min(std::min(std::min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)),
05507            cvCeil(std::max(std::max(std::max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
05508            cvCeil(std::max(std::max(std::max(pt[0].y, pt[1].y), pt[2].y), pt[3].y)));
05509     r.width -= r.x - 1;
05510     r.height -= r.y - 1;
05511     return r;
05512 }
05513 
05514 }
05515 
05516 // glue
05517 
05518 CvMatND::CvMatND(const cv::Mat& m)
05519 {
05520     cvInitMatNDHeader(this, m.dims, m.size, m.type(), m.data );
05521     int i, d = m.dims;
05522     for( i = 0; i < d; i++ )
05523         dim[i].step = (int)m.step[i];
05524     type |= m.flags  & cv::Mat::CONTINUOUS_FLAG;
05525 }
05526 
05527 _IplImage::_IplImage(const cv::Mat& m)
05528 {
05529     CV_Assert( m.dims <= 2 );
05530     cvInitImageHeader(this, m.size(), cvIplDepth(m.flags ), m.channels());
05531     cvSetData(this, m.data, (int)m.step[0]);
05532 }
05533 
05534 CvSparseMat* cvCreateSparseMat(const cv::SparseMat& sm)
05535 {
05536     if( !sm.hdr )
05537         return 0;
05538 
05539     CvSparseMat* m = cvCreateSparseMat(sm.hdr->dims, sm.hdr->size, sm.type());
05540 
05541     cv::SparseMatConstIterator from = sm.begin();
05542     size_t i, N = sm.nzcount(), esz = sm.elemSize();
05543 
05544     for( i = 0; i < N; i++, ++from )
05545     {
05546         const cv::SparseMat::Node* n = from.node();
05547         uchar* to = cvPtrND(m, n->idx, 0, -2, 0);
05548         cv::copyElem(from.ptr, to, esz);
05549     }
05550     return m;
05551 }
05552 
05553 void CvSparseMat::copyToSparseMat(cv::SparseMat& m) const
05554 {
05555     m.create( dims, &size[0], type );
05556 
05557     CvSparseMatIterator it;
05558     CvSparseNode* n = cvInitSparseMatIterator(this, &it);
05559     size_t esz = m.elemSize();
05560 
05561     for( ; n != 0; n = cvGetNextSparseNode(&it) )
05562     {
05563         const int* idx = CV_NODE_IDX(this, n);
05564         uchar* to = m.newNode(idx, m.hash(idx));
05565         cv::copyElem((const uchar*)CV_NODE_VAL(this, n), to, esz);
05566     }
05567 }
05568 
05569 
05570 /* End of file. */
05571