Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: RZ_A2M_Mbed_samples
features2d.hpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other materials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 #ifndef OPENCV_FEATURES_2D_HPP 00044 #define OPENCV_FEATURES_2D_HPP 00045 00046 #include "opencv2/core.hpp" 00047 #include "opencv2/flann/miniflann.hpp" 00048 00049 /** 00050 @defgroup features2d 2D Features Framework 00051 @{ 00052 @defgroup features2d_main Feature Detection and Description 00053 @defgroup features2d_match Descriptor Matchers 00054 00055 Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to 00056 easily switch between different algorithms solving the same problem. This section is devoted to 00057 matching descriptors that are represented as vectors in a multidimensional space. All objects that 00058 implement vector descriptor matchers inherit the DescriptorMatcher interface. 00059 00060 @note 00061 - An example explaining keypoint matching can be found at 00062 opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp 00063 - An example on descriptor matching evaluation can be found at 00064 opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp 00065 - An example on one to many image matching can be found at 00066 opencv_source_code/samples/cpp/matching_to_many_images.cpp 00067 00068 @defgroup features2d_draw Drawing Function of Keypoints and Matches 00069 @defgroup features2d_category Object Categorization 00070 00071 This section describes approaches based on local 2D features and used to categorize objects. 00072 00073 @note 00074 - A complete Bag-Of-Words sample can be found at 00075 opencv_source_code/samples/cpp/bagofwords_classification.cpp 00076 - (Python) An example using the features2D framework to perform object categorization can be 00077 found at opencv_source_code/samples/python/find_obj.py 00078 00079 @} 00080 */ 00081 00082 namespace cv 00083 { 00084 00085 //! @addtogroup features2d 00086 //! @{ 00087 00088 // //! writes vector of keypoints to the file storage 00089 // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints); 00090 // //! reads vector of keypoints from the specified file storage node 00091 // CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints); 00092 00093 /** @brief A class filters a vector of keypoints. 00094 00095 Because now it is difficult to provide a convenient interface for all usage scenarios of the 00096 keypoints filter class, it has only several needed by now static methods. 00097 */ 00098 class CV_EXPORTS KeyPointsFilter 00099 { 00100 public: 00101 KeyPointsFilter(){} 00102 00103 /* 00104 * Remove keypoints within borderPixels of an image edge. 00105 */ 00106 static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize ); 00107 /* 00108 * Remove keypoints of sizes out of range. 00109 */ 00110 static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize, 00111 float maxSize=FLT_MAX ); 00112 /* 00113 * Remove keypoints from some image by mask for pixels of this image. 00114 */ 00115 static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask ); 00116 /* 00117 * Remove duplicated keypoints. 00118 */ 00119 static void removeDuplicated( std::vector<KeyPoint>& keypoints ); 00120 00121 /* 00122 * Retain the specified number of the best keypoints (according to the response) 00123 */ 00124 static void retainBest( std::vector<KeyPoint>& keypoints, int npoints ); 00125 }; 00126 00127 00128 /************************************ Base Classes ************************************/ 00129 00130 /** @brief Abstract base class for 2D image feature detectors and descriptor extractors 00131 */ 00132 class CV_EXPORTS_W Feature2D : public virtual Algorithm 00133 { 00134 public: 00135 virtual ~Feature2D(); 00136 00137 /** @brief Detects keypoints in an image (first variant) or image set (second variant). 00138 00139 @param image Image. 00140 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 00141 of keypoints detected in images[i] . 00142 @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer 00143 matrix with non-zero values in the region of interest. 00144 */ 00145 CV_WRAP virtual void detect( InputArray image, 00146 CV_OUT std::vector<KeyPoint>& keypoints, 00147 InputArray mask=noArray() ); 00148 00149 /** @overload 00150 @param images Image set. 00151 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 00152 of keypoints detected in images[i] . 00153 @param masks Masks for each input image specifying where to look for keypoints (optional). 00154 masks[i] is a mask for images[i]. 00155 */ 00156 CV_WRAP virtual void detect( InputArrayOfArrays images, 00157 CV_OUT std::vector<std::vector<KeyPoint> >& keypoints, 00158 InputArrayOfArrays masks=noArray() ); 00159 00160 /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set 00161 (second variant). 00162 00163 @param image Image. 00164 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 00165 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 00166 with several dominant orientations (for each orientation). 00167 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 00168 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 00169 descriptor for keypoint j-th keypoint. 00170 */ 00171 CV_WRAP virtual void compute( InputArray image, 00172 CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, 00173 OutputArray descriptors ); 00174 00175 /** @overload 00176 00177 @param images Image set. 00178 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 00179 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 00180 with several dominant orientations (for each orientation). 00181 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 00182 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 00183 descriptor for keypoint j-th keypoint. 00184 */ 00185 CV_WRAP virtual void compute( InputArrayOfArrays images, 00186 CV_OUT CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints, 00187 OutputArrayOfArrays descriptors ); 00188 00189 /** Detects keypoints and computes the descriptors */ 00190 CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, 00191 CV_OUT std::vector<KeyPoint>& keypoints, 00192 OutputArray descriptors, 00193 bool useProvidedKeypoints=false ); 00194 00195 CV_WRAP virtual int descriptorSize() const; 00196 CV_WRAP virtual int descriptorType() const; 00197 CV_WRAP virtual int defaultNorm() const; 00198 00199 CV_WRAP void write( const String& fileName ) const; 00200 00201 CV_WRAP void read( const String& fileName ); 00202 00203 virtual void write( FileStorage&) const; 00204 00205 virtual void read( const FileNode&); 00206 00207 //! Return true if detector object is empty 00208 CV_WRAP virtual bool empty() const; 00209 }; 00210 00211 /** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch 00212 between different algorithms solving the same problem. All objects that implement keypoint detectors 00213 inherit the FeatureDetector interface. */ 00214 typedef Feature2D FeatureDetector; 00215 00216 /** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you 00217 to easily switch between different algorithms solving the same problem. This section is devoted to 00218 computing descriptors represented as vectors in a multidimensional space. All objects that implement 00219 the vector descriptor extractors inherit the DescriptorExtractor interface. 00220 */ 00221 typedef Feature2D DescriptorExtractor; 00222 00223 //! @addtogroup features2d_main 00224 //! @{ 00225 00226 /** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 . 00227 */ 00228 class CV_EXPORTS_W BRISK : public Feature2D 00229 { 00230 public: 00231 /** @brief The BRISK constructor 00232 00233 @param thresh AGAST detection threshold score. 00234 @param octaves detection octaves. Use 0 to do single scale. 00235 @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a 00236 keypoint. 00237 */ 00238 CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f); 00239 00240 /** @brief The BRISK constructor for a custom pattern 00241 00242 @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for 00243 keypoint scale 1). 00244 @param numberList defines the number of sampling points on the sampling circle. Must be the same 00245 size as radiusList.. 00246 @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint 00247 scale 1). 00248 @param dMin threshold for the long pairings used for orientation determination (in pixels for 00249 keypoint scale 1). 00250 @param indexChange index remapping of the bits. */ 00251 CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList, 00252 float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>()); 00253 }; 00254 00255 /** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor 00256 00257 described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects 00258 the strongest features using FAST or Harris response, finds their orientation using first-order 00259 moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or 00260 k-tuples) are rotated according to the measured orientation). 00261 */ 00262 class CV_EXPORTS_W ORB : public Feature2D 00263 { 00264 public: 00265 enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; 00266 00267 /** @brief The ORB constructor 00268 00269 @param nfeatures The maximum number of features to retain. 00270 @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical 00271 pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor 00272 will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor 00273 will mean that to cover certain scale range you will need more pyramid levels and so the speed 00274 will suffer. 00275 @param nlevels The number of pyramid levels. The smallest level will have linear size equal to 00276 input_image_linear_size/pow(scaleFactor, nlevels). 00277 @param edgeThreshold This is size of the border where the features are not detected. It should 00278 roughly match the patchSize parameter. 00279 @param firstLevel It should be 0 in the current implementation. 00280 @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The 00281 default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, 00282 so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 00283 random points (of course, those point coordinates are random, but they are generated from the 00284 pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel 00285 rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such 00286 output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, 00287 denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each 00288 bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3). 00289 @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features 00290 (the score is written to KeyPoint::score and is used to retain best nfeatures features); 00291 FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, 00292 but it is a little faster to compute. 00293 @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller 00294 pyramid layers the perceived image area covered by a feature will be larger. 00295 @param fastThreshold 00296 */ 00297 CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31, 00298 int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20); 00299 00300 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 00301 CV_WRAP virtual int getMaxFeatures() const = 0; 00302 00303 CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0; 00304 CV_WRAP virtual double getScaleFactor() const = 0; 00305 00306 CV_WRAP virtual void setNLevels(int nlevels) = 0; 00307 CV_WRAP virtual int getNLevels() const = 0; 00308 00309 CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0; 00310 CV_WRAP virtual int getEdgeThreshold() const = 0; 00311 00312 CV_WRAP virtual void setFirstLevel(int firstLevel) = 0; 00313 CV_WRAP virtual int getFirstLevel() const = 0; 00314 00315 CV_WRAP virtual void setWTA_K(int wta_k) = 0; 00316 CV_WRAP virtual int getWTA_K() const = 0; 00317 00318 CV_WRAP virtual void setScoreType(int scoreType) = 0; 00319 CV_WRAP virtual int getScoreType() const = 0; 00320 00321 CV_WRAP virtual void setPatchSize(int patchSize) = 0; 00322 CV_WRAP virtual int getPatchSize() const = 0; 00323 00324 CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0; 00325 CV_WRAP virtual int getFastThreshold() const = 0; 00326 }; 00327 00328 /** @brief Maximally stable extremal region extractor 00329 00330 The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki 00331 article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)). 00332 00333 - there are two different implementation of %MSER: one for grey image, one for color image 00334 00335 - the grey image algorithm is taken from: @cite nister2008linear ; the paper claims to be faster 00336 than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop. 00337 00338 - the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower 00339 than grey image method ( 3~4 times ); the chi_table.h file is taken directly from paper's source 00340 code which is distributed under GPL. 00341 00342 - (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py 00343 */ 00344 class CV_EXPORTS_W MSER : public Feature2D 00345 { 00346 public: 00347 /** @brief Full consturctor for %MSER detector 00348 00349 @param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$ 00350 @param _min_area prune the area which smaller than minArea 00351 @param _max_area prune the area which bigger than maxArea 00352 @param _max_variation prune the area have simliar size to its children 00353 @param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity 00354 @param _max_evolution for color image, the evolution steps 00355 @param _area_threshold for color image, the area threshold to cause re-initialize 00356 @param _min_margin for color image, ignore too small margin 00357 @param _edge_blur_size for color image, the aperture size for edge blur 00358 */ 00359 CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400, 00360 double _max_variation=0.25, double _min_diversity=.2, 00361 int _max_evolution=200, double _area_threshold=1.01, 00362 double _min_margin=0.003, int _edge_blur_size=5 ); 00363 00364 /** @brief Detect %MSER regions 00365 00366 @param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3) 00367 @param msers resulting list of point sets 00368 @param bboxes resulting bounding boxes 00369 */ 00370 CV_WRAP virtual void detectRegions( InputArray image, 00371 CV_OUT std::vector<std::vector<Point> >& msers, 00372 CV_OUT std::vector<Rect>& bboxes ) = 0; 00373 00374 CV_WRAP virtual void setDelta(int delta) = 0; 00375 CV_WRAP virtual int getDelta() const = 0; 00376 00377 CV_WRAP virtual void setMinArea(int minArea) = 0; 00378 CV_WRAP virtual int getMinArea() const = 0; 00379 00380 CV_WRAP virtual void setMaxArea(int maxArea) = 0; 00381 CV_WRAP virtual int getMaxArea() const = 0; 00382 00383 CV_WRAP virtual void setPass2Only(bool f) = 0; 00384 CV_WRAP virtual bool getPass2Only() const = 0; 00385 }; 00386 00387 /** @overload */ 00388 CV_EXPORTS void FAST ( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 00389 int threshold, bool nonmaxSuppression=true ); 00390 00391 /** @brief Detects corners using the FAST algorithm 00392 00393 @param image grayscale image where keypoints (corners) are detected. 00394 @param keypoints keypoints detected on the image. 00395 @param threshold threshold on difference between intensity of the central pixel and pixels of a 00396 circle around this pixel. 00397 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 00398 (keypoints). 00399 @param type one of the three neighborhoods as defined in the paper: 00400 FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, 00401 FastFeatureDetector::TYPE_5_8 00402 00403 Detects corners using the FAST algorithm by @cite Rosten06 . 00404 00405 @note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8, 00406 cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner 00407 detection, use cv2.FAST.detect() method. 00408 */ 00409 CV_EXPORTS void FAST ( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 00410 int threshold, bool nonmaxSuppression, int type ); 00411 00412 //! @} features2d_main 00413 00414 //! @addtogroup features2d_main 00415 //! @{ 00416 00417 /** @brief Wrapping class for feature detection using the FAST method. : 00418 */ 00419 class CV_EXPORTS_W FastFeatureDetector : public Feature2D 00420 { 00421 public: 00422 enum 00423 { 00424 TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2, 00425 THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002, 00426 }; 00427 00428 CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, 00429 bool nonmaxSuppression=true, 00430 int type=FastFeatureDetector::TYPE_9_16 ); 00431 00432 CV_WRAP virtual void setThreshold(int threshold) = 0; 00433 CV_WRAP virtual int getThreshold() const = 0; 00434 00435 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 00436 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 00437 00438 CV_WRAP virtual void setType(int type) = 0; 00439 CV_WRAP virtual int getType() const = 0; 00440 }; 00441 00442 /** @overload */ 00443 CV_EXPORTS void AGAST ( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 00444 int threshold, bool nonmaxSuppression=true ); 00445 00446 /** @brief Detects corners using the AGAST algorithm 00447 00448 @param image grayscale image where keypoints (corners) are detected. 00449 @param keypoints keypoints detected on the image. 00450 @param threshold threshold on difference between intensity of the central pixel and pixels of a 00451 circle around this pixel. 00452 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 00453 (keypoints). 00454 @param type one of the four neighborhoods as defined in the paper: 00455 AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d, 00456 AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16 00457 00458 For non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results. 00459 The 32-bit binary tree tables were generated automatically from original code using perl script. 00460 The perl script and examples of tree generation are placed in features2d/doc folder. 00461 Detects corners using the AGAST algorithm by @cite mair2010_agast . 00462 00463 */ 00464 CV_EXPORTS void AGAST ( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 00465 int threshold, bool nonmaxSuppression, int type ); 00466 //! @} features2d_main 00467 00468 //! @addtogroup features2d_main 00469 //! @{ 00470 00471 /** @brief Wrapping class for feature detection using the AGAST method. : 00472 */ 00473 class CV_EXPORTS_W AgastFeatureDetector : public Feature2D 00474 { 00475 public: 00476 enum 00477 { 00478 AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3, 00479 THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001, 00480 }; 00481 00482 CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10, 00483 bool nonmaxSuppression=true, 00484 int type=AgastFeatureDetector::OAST_9_16 ); 00485 00486 CV_WRAP virtual void setThreshold(int threshold) = 0; 00487 CV_WRAP virtual int getThreshold() const = 0; 00488 00489 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 00490 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 00491 00492 CV_WRAP virtual void setType(int type) = 0; 00493 CV_WRAP virtual int getType() const = 0; 00494 }; 00495 00496 /** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. : 00497 */ 00498 class CV_EXPORTS_W GFTTDetector : public Feature2D 00499 { 00500 public: 00501 CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, 00502 int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); 00503 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 00504 CV_WRAP virtual int getMaxFeatures() const = 0; 00505 00506 CV_WRAP virtual void setQualityLevel(double qlevel) = 0; 00507 CV_WRAP virtual double getQualityLevel() const = 0; 00508 00509 CV_WRAP virtual void setMinDistance(double minDistance) = 0; 00510 CV_WRAP virtual double getMinDistance() const = 0; 00511 00512 CV_WRAP virtual void setBlockSize(int blockSize) = 0; 00513 CV_WRAP virtual int getBlockSize() const = 0; 00514 00515 CV_WRAP virtual void setHarrisDetector(bool val) = 0; 00516 CV_WRAP virtual bool getHarrisDetector() const = 0; 00517 00518 CV_WRAP virtual void setK(double k) = 0; 00519 CV_WRAP virtual double getK() const = 0; 00520 }; 00521 00522 /** @brief Class for extracting blobs from an image. : 00523 00524 The class implements a simple algorithm for extracting blobs from an image: 00525 00526 1. Convert the source image to binary images by applying thresholding with several thresholds from 00527 minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between 00528 neighboring thresholds. 00529 2. Extract connected components from every binary image by findContours and calculate their 00530 centers. 00531 3. Group centers from several binary images by their coordinates. Close centers form one group that 00532 corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter. 00533 4. From the groups, estimate final centers of blobs and their radiuses and return as locations and 00534 sizes of keypoints. 00535 00536 This class performs several filtrations of returned blobs. You should set filterBy\* to true/false 00537 to turn on/off corresponding filtration. Available filtrations: 00538 00539 - **By color**. This filter compares the intensity of a binary image at the center of a blob to 00540 blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs 00541 and blobColor = 255 to extract light blobs. 00542 - **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive). 00543 - **By circularity**. Extracted blobs have circularity 00544 (\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and 00545 maxCircularity (exclusive). 00546 - **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio 00547 between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive). 00548 - **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between 00549 minConvexity (inclusive) and maxConvexity (exclusive). 00550 00551 Default values of parameters are tuned to extract dark circular blobs. 00552 */ 00553 class CV_EXPORTS_W SimpleBlobDetector : public Feature2D 00554 { 00555 public: 00556 struct CV_EXPORTS_W_SIMPLE Params 00557 { 00558 CV_WRAP Params(); 00559 CV_PROP_RW float thresholdStep; 00560 CV_PROP_RW float minThreshold; 00561 CV_PROP_RW float maxThreshold; 00562 CV_PROP_RW size_t minRepeatability; 00563 CV_PROP_RW float minDistBetweenBlobs; 00564 00565 CV_PROP_RW bool filterByColor; 00566 CV_PROP_RW uchar blobColor; 00567 00568 CV_PROP_RW bool filterByArea; 00569 CV_PROP_RW float minArea, maxArea; 00570 00571 CV_PROP_RW bool filterByCircularity; 00572 CV_PROP_RW float minCircularity, maxCircularity; 00573 00574 CV_PROP_RW bool filterByInertia; 00575 CV_PROP_RW float minInertiaRatio, maxInertiaRatio; 00576 00577 CV_PROP_RW bool filterByConvexity; 00578 CV_PROP_RW float minConvexity, maxConvexity; 00579 00580 void read( const FileNode& fn ); 00581 void write( FileStorage& fs ) const; 00582 }; 00583 00584 CV_WRAP static Ptr<SimpleBlobDetector> 00585 create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); 00586 }; 00587 00588 //! @} features2d_main 00589 00590 //! @addtogroup features2d_main 00591 //! @{ 00592 00593 /** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 . 00594 00595 @note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo 00596 F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision 00597 (ECCV), Fiorenze, Italy, October 2012. 00598 */ 00599 class CV_EXPORTS_W KAZE : public Feature2D 00600 { 00601 public: 00602 enum 00603 { 00604 DIFF_PM_G1 = 0, 00605 DIFF_PM_G2 = 1, 00606 DIFF_WEICKERT = 2, 00607 DIFF_CHARBONNIER = 3 00608 }; 00609 00610 /** @brief The KAZE constructor 00611 00612 @param extended Set to enable extraction of extended (128-byte) descriptor. 00613 @param upright Set to enable use of upright descriptors (non rotation-invariant). 00614 @param threshold Detector response threshold to accept point 00615 @param nOctaves Maximum octave evolution of the image 00616 @param nOctaveLayers Default number of sublevels per scale level 00617 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 00618 DIFF_CHARBONNIER 00619 */ 00620 CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false, 00621 float threshold = 0.001f, 00622 int nOctaves = 4, int nOctaveLayers = 4, 00623 int diffusivity = KAZE::DIFF_PM_G2); 00624 00625 CV_WRAP virtual void setExtended(bool extended) = 0; 00626 CV_WRAP virtual bool getExtended() const = 0; 00627 00628 CV_WRAP virtual void setUpright(bool upright) = 0; 00629 CV_WRAP virtual bool getUpright() const = 0; 00630 00631 CV_WRAP virtual void setThreshold(double threshold) = 0; 00632 CV_WRAP virtual double getThreshold() const = 0; 00633 00634 CV_WRAP virtual void setNOctaves(int octaves) = 0; 00635 CV_WRAP virtual int getNOctaves() const = 0; 00636 00637 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 00638 CV_WRAP virtual int getNOctaveLayers() const = 0; 00639 00640 CV_WRAP virtual void setDiffusivity(int diff) = 0; 00641 CV_WRAP virtual int getDiffusivity() const = 0; 00642 }; 00643 00644 /** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13 . : 00645 00646 @note AKAZE descriptors can only be used with KAZE or AKAZE keypoints. Try to avoid using *extract* 00647 and *detect* instead of *operator()* due to performance reasons. .. [ANB13] Fast Explicit Diffusion 00648 for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien 00649 Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. 00650 */ 00651 class CV_EXPORTS_W AKAZE : public Feature2D 00652 { 00653 public: 00654 // AKAZE descriptor type 00655 enum 00656 { 00657 DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation 00658 DESCRIPTOR_KAZE = 3, 00659 DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation 00660 DESCRIPTOR_MLDB = 5 00661 }; 00662 00663 /** @brief The AKAZE constructor 00664 00665 @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE, 00666 DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT. 00667 @param descriptor_size Size of the descriptor in bits. 0 -> Full size 00668 @param descriptor_channels Number of channels in the descriptor (1, 2, 3) 00669 @param threshold Detector response threshold to accept point 00670 @param nOctaves Maximum octave evolution of the image 00671 @param nOctaveLayers Default number of sublevels per scale level 00672 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 00673 DIFF_CHARBONNIER 00674 */ 00675 CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB, 00676 int descriptor_size = 0, int descriptor_channels = 3, 00677 float threshold = 0.001f, int nOctaves = 4, 00678 int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2); 00679 00680 CV_WRAP virtual void setDescriptorType(int dtype) = 0; 00681 CV_WRAP virtual int getDescriptorType() const = 0; 00682 00683 CV_WRAP virtual void setDescriptorSize(int dsize) = 0; 00684 CV_WRAP virtual int getDescriptorSize() const = 0; 00685 00686 CV_WRAP virtual void setDescriptorChannels(int dch) = 0; 00687 CV_WRAP virtual int getDescriptorChannels() const = 0; 00688 00689 CV_WRAP virtual void setThreshold(double threshold) = 0; 00690 CV_WRAP virtual double getThreshold() const = 0; 00691 00692 CV_WRAP virtual void setNOctaves(int octaves) = 0; 00693 CV_WRAP virtual int getNOctaves() const = 0; 00694 00695 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 00696 CV_WRAP virtual int getNOctaveLayers() const = 0; 00697 00698 CV_WRAP virtual void setDiffusivity(int diff) = 0; 00699 CV_WRAP virtual int getDiffusivity() const = 0; 00700 }; 00701 00702 //! @} features2d_main 00703 00704 /****************************************************************************************\ 00705 * Distance * 00706 \****************************************************************************************/ 00707 00708 template<typename T> 00709 struct CV_EXPORTS Accumulator 00710 { 00711 typedef T Type; 00712 }; 00713 00714 template<> struct Accumulator<unsigned char> { typedef float Type; }; 00715 template<> struct Accumulator<unsigned short> { typedef float Type; }; 00716 template<> struct Accumulator<char> { typedef float Type; }; 00717 template<> struct Accumulator<short> { typedef float Type; }; 00718 00719 /* 00720 * Squared Euclidean distance functor 00721 */ 00722 template<class T> 00723 struct CV_EXPORTS SL2 00724 { 00725 enum { normType = NORM_L2SQR }; 00726 typedef T ValueType; 00727 typedef typename Accumulator<T>::Type ResultType; 00728 00729 ResultType operator()( const T* a, const T* b, int size ) const 00730 { 00731 return normL2Sqr<ValueType, ResultType>(a, b, size); 00732 } 00733 }; 00734 00735 /* 00736 * Euclidean distance functor 00737 */ 00738 template<class T> 00739 struct CV_EXPORTS L2 00740 { 00741 enum { normType = NORM_L2 }; 00742 typedef T ValueType; 00743 typedef typename Accumulator<T>::Type ResultType; 00744 00745 ResultType operator()( const T* a, const T* b, int size ) const 00746 { 00747 return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size)); 00748 } 00749 }; 00750 00751 /* 00752 * Manhattan distance (city block distance) functor 00753 */ 00754 template<class T> 00755 struct CV_EXPORTS L1 00756 { 00757 enum { normType = NORM_L1 }; 00758 typedef T ValueType; 00759 typedef typename Accumulator<T>::Type ResultType; 00760 00761 ResultType operator()( const T* a, const T* b, int size ) const 00762 { 00763 return normL1<ValueType, ResultType>(a, b, size); 00764 } 00765 }; 00766 00767 /****************************************************************************************\ 00768 * DescriptorMatcher * 00769 \****************************************************************************************/ 00770 00771 //! @addtogroup features2d_match 00772 //! @{ 00773 00774 /** @brief Abstract base class for matching keypoint descriptors. 00775 00776 It has two groups of match methods: for matching descriptors of an image with another image or with 00777 an image set. 00778 */ 00779 class CV_EXPORTS_W DescriptorMatcher : public Algorithm 00780 { 00781 public: 00782 enum 00783 { 00784 FLANNBASED = 1, 00785 BRUTEFORCE = 2, 00786 BRUTEFORCE_L1 = 3, 00787 BRUTEFORCE_HAMMING = 4, 00788 BRUTEFORCE_HAMMINGLUT = 5, 00789 BRUTEFORCE_SL2 = 6 00790 }; 00791 virtual ~DescriptorMatcher(); 00792 00793 /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor 00794 collection. 00795 00796 If the collection is not empty, the new descriptors are added to existing train descriptors. 00797 00798 @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same 00799 train image. 00800 */ 00801 CV_WRAP virtual void add( InputArrayOfArrays descriptors ); 00802 00803 /** @brief Returns a constant link to the train descriptor collection trainDescCollection . 00804 */ 00805 CV_WRAP const std::vector<Mat>& getTrainDescriptors() const; 00806 00807 /** @brief Clears the train descriptor collections. 00808 */ 00809 CV_WRAP virtual void clear(); 00810 00811 /** @brief Returns true if there are no train descriptors in the both collections. 00812 */ 00813 CV_WRAP virtual bool empty() const; 00814 00815 /** @brief Returns true if the descriptor matcher supports masking permissible matches. 00816 */ 00817 CV_WRAP virtual bool isMaskSupported() const = 0; 00818 00819 /** @brief Trains a descriptor matcher 00820 00821 Trains a descriptor matcher (for example, the flann index). In all methods to match, the method 00822 train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher) 00823 have an empty implementation of this method. Other matchers really train their inner structures (for 00824 example, FlannBasedMatcher trains flann::Index ). 00825 */ 00826 CV_WRAP virtual void train(); 00827 00828 /** @brief Finds the best match for each descriptor from a query set. 00829 00830 @param queryDescriptors Query set of descriptors. 00831 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 00832 collection stored in the class object. 00833 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 00834 descriptor. So, matches size may be smaller than the query descriptors count. 00835 @param mask Mask specifying permissible matches between an input query and train matrices of 00836 descriptors. 00837 00838 In the first variant of this method, the train descriptors are passed as an input argument. In the 00839 second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is 00840 used. Optional mask (or masks) can be passed to specify which query and training descriptors can be 00841 matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if 00842 mask.at<uchar>(i,j) is non-zero. 00843 */ 00844 CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors, 00845 CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const; 00846 00847 /** @brief Finds the k best matches for each descriptor from a query set. 00848 00849 @param queryDescriptors Query set of descriptors. 00850 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 00851 collection stored in the class object. 00852 @param mask Mask specifying permissible matches between an input query and train matrices of 00853 descriptors. 00854 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 00855 @param k Count of best matches found per each query descriptor or less if a query descriptor has 00856 less than k possible matches in total. 00857 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 00858 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 00859 the matches vector does not contain matches for fully masked-out query descriptors. 00860 00861 These extended variants of DescriptorMatcher::match methods find several best matches for each query 00862 descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match 00863 for the details about query and train descriptors. 00864 */ 00865 CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors, 00866 CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 00867 InputArray mask=noArray(), bool compactResult=false ) const; 00868 00869 /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance. 00870 00871 @param queryDescriptors Query set of descriptors. 00872 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 00873 collection stored in the class object. 00874 @param matches Found matches. 00875 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 00876 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 00877 the matches vector does not contain matches for fully masked-out query descriptors. 00878 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 00879 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 00880 in Pixels)! 00881 @param mask Mask specifying permissible matches between an input query and train matrices of 00882 descriptors. 00883 00884 For each query descriptor, the methods find such training descriptors that the distance between the 00885 query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are 00886 returned in the distance increasing order. 00887 */ 00888 CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, 00889 CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance, 00890 InputArray mask=noArray(), bool compactResult=false ) const; 00891 00892 /** @overload 00893 @param queryDescriptors Query set of descriptors. 00894 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 00895 descriptor. So, matches size may be smaller than the query descriptors count. 00896 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 00897 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 00898 */ 00899 CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches, 00900 InputArrayOfArrays masks=noArray() ); 00901 /** @overload 00902 @param queryDescriptors Query set of descriptors. 00903 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 00904 @param k Count of best matches found per each query descriptor or less if a query descriptor has 00905 less than k possible matches in total. 00906 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 00907 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 00908 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 00909 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 00910 the matches vector does not contain matches for fully masked-out query descriptors. 00911 */ 00912 CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 00913 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 00914 /** @overload 00915 @param queryDescriptors Query set of descriptors. 00916 @param matches Found matches. 00917 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 00918 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 00919 in Pixels)! 00920 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 00921 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 00922 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 00923 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 00924 the matches vector does not contain matches for fully masked-out query descriptors. 00925 */ 00926 CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance, 00927 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 00928 00929 00930 CV_WRAP void write( const String& fileName ) const 00931 { 00932 FileStorage fs(fileName, FileStorage::WRITE); 00933 write(fs); 00934 } 00935 00936 CV_WRAP void read( const String& fileName ) 00937 { 00938 FileStorage fs(fileName, FileStorage::READ); 00939 read(fs.root()); 00940 } 00941 // Reads matcher object from a file node 00942 virtual void read( const FileNode& ); 00943 // Writes matcher object to a file storage 00944 virtual void write( FileStorage& ) const; 00945 00946 /** @brief Clones the matcher. 00947 00948 @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 00949 that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 00950 object copy with the current parameters but with empty train data. 00951 */ 00952 CV_WRAP virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0; 00953 00954 /** @brief Creates a descriptor matcher of a given type with the default parameters (using default 00955 constructor). 00956 00957 @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are 00958 supported: 00959 - `BruteForce` (it uses L2 ) 00960 - `BruteForce-L1` 00961 - `BruteForce-Hamming` 00962 - `BruteForce-Hamming(2)` 00963 - `FlannBased` 00964 */ 00965 CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType ); 00966 00967 CV_WRAP static Ptr<DescriptorMatcher> create( int matcherType ); 00968 00969 protected: 00970 /** 00971 * Class to work with descriptors from several images as with one merged matrix. 00972 * It is used e.g. in FlannBasedMatcher. 00973 */ 00974 class CV_EXPORTS DescriptorCollection 00975 { 00976 public: 00977 DescriptorCollection(); 00978 DescriptorCollection( const DescriptorCollection& collection ); 00979 virtual ~DescriptorCollection(); 00980 00981 // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. 00982 void set( const std::vector<Mat>& descriptors ); 00983 virtual void clear(); 00984 00985 const Mat& getDescriptors() const; 00986 const Mat getDescriptor( int imgIdx, int localDescIdx ) const; 00987 const Mat getDescriptor( int globalDescIdx ) const; 00988 void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; 00989 00990 int size() const; 00991 00992 protected: 00993 Mat mergedDescriptors; 00994 std::vector<int> startIdxs; 00995 }; 00996 00997 //! In fact the matching is implemented only by the following two methods. These methods suppose 00998 //! that the class object has been trained already. Public match methods call these methods 00999 //! after calling train(). 01000 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 01001 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 01002 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 01003 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 01004 01005 static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx ); 01006 static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx ); 01007 01008 static Mat clone_op( Mat m ) { return m.clone(); } 01009 void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const; 01010 01011 //! Collection of descriptors from train images. 01012 std::vector<Mat> trainDescCollection; 01013 std::vector<UMat> utrainDescCollection; 01014 }; 01015 01016 /** @brief Brute-force descriptor matcher. 01017 01018 For each descriptor in the first set, this matcher finds the closest descriptor in the second set 01019 by trying each one. This descriptor matcher supports masking permissible matches of descriptor 01020 sets. 01021 */ 01022 class CV_EXPORTS_W BFMatcher : public DescriptorMatcher 01023 { 01024 public: 01025 /** @brief Brute-force matcher constructor (obsolete). Please use BFMatcher.create() 01026 * 01027 * 01028 */ 01029 CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); 01030 01031 virtual ~BFMatcher() {} 01032 01033 virtual bool isMaskSupported() const { return true; } 01034 01035 /* @brief Brute-force matcher create method. 01036 @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are 01037 preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and 01038 BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor 01039 description). 01040 @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k 01041 nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with 01042 k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the 01043 matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent 01044 pairs. Such technique usually produces best results with minimal number of outliers when there are 01045 enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. 01046 */ 01047 CV_WRAP static Ptr<BFMatcher> create( int normType=NORM_L2, bool crossCheck=false ) ; 01048 01049 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; 01050 protected: 01051 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 01052 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 01053 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 01054 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 01055 01056 int normType; 01057 bool crossCheck; 01058 }; 01059 01060 01061 /** @brief Flann-based descriptor matcher. 01062 01063 This matcher trains cv::flann::Index on a train descriptor collection and calls its nearest search 01064 methods to find the best matches. So, this matcher may be faster when matching a large train 01065 collection than the brute force matcher. FlannBasedMatcher does not support masking permissible 01066 matches of descriptor sets because flann::Index does not support this. : 01067 */ 01068 class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher 01069 { 01070 public: 01071 CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams> & indexParams=makePtr<flann::KDTreeIndexParams>(), 01072 const Ptr<flann::SearchParams> & searchParams=makePtr<flann::SearchParams>() ); 01073 01074 virtual void add( InputArrayOfArrays descriptors ); 01075 virtual void clear(); 01076 01077 // Reads matcher object from a file node 01078 virtual void read( const FileNode& ); 01079 // Writes matcher object to a file storage 01080 virtual void write( FileStorage& ) const; 01081 01082 virtual void train(); 01083 virtual bool isMaskSupported() const; 01084 01085 CV_WRAP static Ptr<FlannBasedMatcher> create(); 01086 01087 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; 01088 protected: 01089 static void convertToDMatches( const DescriptorCollection& descriptors, 01090 const Mat& indices, const Mat& distances, 01091 std::vector<std::vector<DMatch> >& matches ); 01092 01093 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 01094 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 01095 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 01096 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 01097 01098 Ptr<flann::IndexParams> indexParams; 01099 Ptr<flann::SearchParams> searchParams; 01100 Ptr<flann::Index> flannIndex; 01101 01102 DescriptorCollection mergedDescriptors; 01103 int addedDescCount; 01104 }; 01105 01106 //! @} features2d_match 01107 01108 /****************************************************************************************\ 01109 * Drawing functions * 01110 \****************************************************************************************/ 01111 01112 //! @addtogroup features2d_draw 01113 //! @{ 01114 01115 struct CV_EXPORTS DrawMatchesFlags 01116 { 01117 enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create), 01118 //!< i.e. existing memory of output image may be reused. 01119 //!< Two source image, matches and single keypoints will be drawn. 01120 //!< For each keypoint only the center point will be drawn (without 01121 //!< the circle around keypoint with keypoint size and orientation). 01122 DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create). 01123 //!< Matches will be drawn on existing content of output image. 01124 NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn. 01125 DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and 01126 //!< orientation will be drawn. 01127 }; 01128 }; 01129 01130 /** @brief Draws keypoints. 01131 01132 @param image Source image. 01133 @param keypoints Keypoints from the source image. 01134 @param outImage Output image. Its content depends on the flags value defining what is drawn in the 01135 output image. See possible flags bit values below. 01136 @param color Color of keypoints. 01137 @param flags Flags setting drawing features. Possible flags bit values are defined by 01138 DrawMatchesFlags. See details above in drawMatches . 01139 01140 @note 01141 For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT, 01142 cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, 01143 cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS 01144 */ 01145 CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage, 01146 const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); 01147 01148 /** @brief Draws the found matches of keypoints from two images. 01149 01150 @param img1 First source image. 01151 @param keypoints1 Keypoints from the first source image. 01152 @param img2 Second source image. 01153 @param keypoints2 Keypoints from the second source image. 01154 @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i] 01155 has a corresponding point in keypoints2[matches[i]] . 01156 @param outImg Output image. Its content depends on the flags value defining what is drawn in the 01157 output image. See possible flags bit values below. 01158 @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) 01159 , the color is generated randomly. 01160 @param singlePointColor Color of single keypoints (circles), which means that keypoints do not 01161 have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly. 01162 @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are 01163 drawn. 01164 @param flags Flags setting drawing features. Possible flags bit values are defined by 01165 DrawMatchesFlags. 01166 01167 This function draws matches of keypoints from two images in the output image. Match is a line 01168 connecting two keypoints (circles). See cv::DrawMatchesFlags. 01169 */ 01170 CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 01171 InputArray img2, const std::vector<KeyPoint>& keypoints2, 01172 const std::vector<DMatch>& matches1to2, InputOutputArray outImg, 01173 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 01174 const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT ); 01175 01176 /** @overload */ 01177 CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 01178 InputArray img2, const std::vector<KeyPoint>& keypoints2, 01179 const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg, 01180 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 01181 const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT ); 01182 01183 //! @} features2d_draw 01184 01185 /****************************************************************************************\ 01186 * Functions to evaluate the feature detectors and [generic] descriptor extractors * 01187 \****************************************************************************************/ 01188 01189 CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, 01190 std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2, 01191 float& repeatability, int& correspCount, 01192 const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() ); 01193 01194 CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2, 01195 const std::vector<std::vector<uchar> >& correctMatches1to2Mask, 01196 std::vector<Point2f>& recallPrecisionCurve ); 01197 01198 CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 01199 CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 01200 01201 /****************************************************************************************\ 01202 * Bag of visual words * 01203 \****************************************************************************************/ 01204 01205 //! @addtogroup features2d_category 01206 //! @{ 01207 01208 /** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors. 01209 01210 For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka, 01211 Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. : 01212 */ 01213 class CV_EXPORTS_W BOWTrainer 01214 { 01215 public: 01216 BOWTrainer(); 01217 virtual ~BOWTrainer(); 01218 01219 /** @brief Adds descriptors to a training set. 01220 01221 @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a 01222 descriptor. 01223 01224 The training set is clustered using clustermethod to construct the vocabulary. 01225 */ 01226 CV_WRAP void add( const Mat& descriptors ); 01227 01228 /** @brief Returns a training set of descriptors. 01229 */ 01230 CV_WRAP const std::vector<Mat>& getDescriptors() const; 01231 01232 /** @brief Returns the count of all descriptors stored in the training set. 01233 */ 01234 CV_WRAP int descriptorsCount() const; 01235 01236 CV_WRAP virtual void clear(); 01237 01238 /** @overload */ 01239 CV_WRAP virtual Mat cluster() const = 0; 01240 01241 /** @brief Clusters train descriptors. 01242 01243 @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor. 01244 Descriptors are not added to the inner train descriptor set. 01245 01246 The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first 01247 variant of the method, train descriptors stored in the object are clustered. In the second variant, 01248 input descriptors are clustered. 01249 */ 01250 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0; 01251 01252 protected: 01253 std::vector<Mat> descriptors; 01254 int size; 01255 }; 01256 01257 /** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. : 01258 */ 01259 class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer 01260 { 01261 public: 01262 /** @brief The constructor. 01263 01264 @see cv::kmeans 01265 */ 01266 CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), 01267 int attempts=3, int flags=KMEANS_PP_CENTERS ); 01268 virtual ~BOWKMeansTrainer(); 01269 01270 // Returns trained vocabulary (i.e. cluster centers). 01271 CV_WRAP virtual Mat cluster() const; 01272 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const; 01273 01274 protected: 01275 01276 int clusterCount; 01277 TermCriteria termcrit; 01278 int attempts; 01279 int flags; 01280 }; 01281 01282 /** @brief Class to compute an image descriptor using the *bag of visual words*. 01283 01284 Such a computation consists of the following steps: 01285 01286 1. Compute descriptors for a given image and its keypoints set. 01287 2. Find the nearest visual words from the vocabulary for each keypoint descriptor. 01288 3. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words 01289 encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the 01290 vocabulary in the given image. 01291 */ 01292 class CV_EXPORTS_W BOWImgDescriptorExtractor 01293 { 01294 public: 01295 /** @brief The constructor. 01296 01297 @param dextractor Descriptor extractor that is used to compute descriptors for an input image and 01298 its keypoints. 01299 @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary 01300 for each keypoint descriptor of the image. 01301 */ 01302 CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor> & dextractor, 01303 const Ptr<DescriptorMatcher> & dmatcher ); 01304 /** @overload */ 01305 BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher> & dmatcher ); 01306 virtual ~BOWImgDescriptorExtractor(); 01307 01308 /** @brief Sets a visual vocabulary. 01309 01310 @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the 01311 vocabulary is a visual word (cluster center). 01312 */ 01313 CV_WRAP void setVocabulary( const Mat& vocabulary ); 01314 01315 /** @brief Returns the set vocabulary. 01316 */ 01317 CV_WRAP const Mat& getVocabulary() const; 01318 01319 /** @brief Computes an image descriptor using the set visual vocabulary. 01320 01321 @param image Image, for which the descriptor is computed. 01322 @param keypoints Keypoints detected in the input image. 01323 @param imgDescriptor Computed output image descriptor. 01324 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 01325 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 01326 returned if it is non-zero. 01327 @param descriptors Descriptors of the image keypoints that are returned if they are non-zero. 01328 */ 01329 void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor, 01330 std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 ); 01331 /** @overload 01332 @param keypointDescriptors Computed descriptors to match with vocabulary. 01333 @param imgDescriptor Computed output image descriptor. 01334 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 01335 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 01336 returned if it is non-zero. 01337 */ 01338 void compute( InputArray keypointDescriptors, OutputArray imgDescriptor, 01339 std::vector<std::vector<int> >* pointIdxsOfClusters=0 ); 01340 // compute() is not constant because DescriptorMatcher::match is not constant 01341 01342 CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor ) 01343 { compute(image,keypoints,imgDescriptor); } 01344 01345 /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0. 01346 */ 01347 CV_WRAP int descriptorSize() const; 01348 01349 /** @brief Returns an image descriptor type. 01350 */ 01351 CV_WRAP int descriptorType() const; 01352 01353 protected: 01354 Mat vocabulary; 01355 Ptr<DescriptorExtractor> dextractor; 01356 Ptr<DescriptorMatcher> dmatcher; 01357 }; 01358 01359 //! @} features2d_category 01360 01361 //! @} features2d 01362 01363 } /* namespace cv */ 01364 01365 #endif
Generated on Tue Jul 12 2022 18:20:17 by
