openCV library for Renesas RZ/A
Dependents: RZ_A2M_Mbed_samples
include/opencv2/video/tracking.hpp@0:0e0631af0305, 2021-01-29 (annotated)
- Committer:
- RyoheiHagimoto
- Date:
- Fri Jan 29 04:53:38 2021 +0000
- Revision:
- 0:0e0631af0305
copied from https://github.com/d-kato/opencv-lib.
Who changed what in which revision?
| User | Revision | Line number | New contents of line |
|---|---|---|---|
| RyoheiHagimoto | 0:0e0631af0305 | 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// |
| RyoheiHagimoto | 0:0e0631af0305 | 2 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
| RyoheiHagimoto | 0:0e0631af0305 | 4 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 5 | // By downloading, copying, installing or using the software you agree to this license. |
| RyoheiHagimoto | 0:0e0631af0305 | 6 | // If you do not agree to this license, do not download, install, |
| RyoheiHagimoto | 0:0e0631af0305 | 7 | // copy or use the software. |
| RyoheiHagimoto | 0:0e0631af0305 | 8 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 9 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 10 | // License Agreement |
| RyoheiHagimoto | 0:0e0631af0305 | 11 | // For Open Source Computer Vision Library |
| RyoheiHagimoto | 0:0e0631af0305 | 12 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 13 | // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
| RyoheiHagimoto | 0:0e0631af0305 | 14 | // Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
| RyoheiHagimoto | 0:0e0631af0305 | 15 | // Copyright (C) 2013, OpenCV Foundation, all rights reserved. |
| RyoheiHagimoto | 0:0e0631af0305 | 16 | // Third party copyrights are property of their respective owners. |
| RyoheiHagimoto | 0:0e0631af0305 | 17 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 18 | // Redistribution and use in source and binary forms, with or without modification, |
| RyoheiHagimoto | 0:0e0631af0305 | 19 | // are permitted provided that the following conditions are met: |
| RyoheiHagimoto | 0:0e0631af0305 | 20 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 21 | // * Redistribution's of source code must retain the above copyright notice, |
| RyoheiHagimoto | 0:0e0631af0305 | 22 | // this list of conditions and the following disclaimer. |
| RyoheiHagimoto | 0:0e0631af0305 | 23 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 24 | // * Redistribution's in binary form must reproduce the above copyright notice, |
| RyoheiHagimoto | 0:0e0631af0305 | 25 | // this list of conditions and the following disclaimer in the documentation |
| RyoheiHagimoto | 0:0e0631af0305 | 26 | // and/or other materials provided with the distribution. |
| RyoheiHagimoto | 0:0e0631af0305 | 27 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 28 | // * The name of the copyright holders may not be used to endorse or promote products |
| RyoheiHagimoto | 0:0e0631af0305 | 29 | // derived from this software without specific prior written permission. |
| RyoheiHagimoto | 0:0e0631af0305 | 30 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 31 | // This software is provided by the copyright holders and contributors "as is" and |
| RyoheiHagimoto | 0:0e0631af0305 | 32 | // any express or implied warranties, including, but not limited to, the implied |
| RyoheiHagimoto | 0:0e0631af0305 | 33 | // warranties of merchantability and fitness for a particular purpose are disclaimed. |
| RyoheiHagimoto | 0:0e0631af0305 | 34 | // In no event shall the Intel Corporation or contributors be liable for any direct, |
| RyoheiHagimoto | 0:0e0631af0305 | 35 | // indirect, incidental, special, exemplary, or consequential damages |
| RyoheiHagimoto | 0:0e0631af0305 | 36 | // (including, but not limited to, procurement of substitute goods or services; |
| RyoheiHagimoto | 0:0e0631af0305 | 37 | // loss of use, data, or profits; or business interruption) however caused |
| RyoheiHagimoto | 0:0e0631af0305 | 38 | // and on any theory of liability, whether in contract, strict liability, |
| RyoheiHagimoto | 0:0e0631af0305 | 39 | // or tort (including negligence or otherwise) arising in any way out of |
| RyoheiHagimoto | 0:0e0631af0305 | 40 | // the use of this software, even if advised of the possibility of such damage. |
| RyoheiHagimoto | 0:0e0631af0305 | 41 | // |
| RyoheiHagimoto | 0:0e0631af0305 | 42 | //M*/ |
| RyoheiHagimoto | 0:0e0631af0305 | 43 | |
| RyoheiHagimoto | 0:0e0631af0305 | 44 | #ifndef OPENCV_TRACKING_HPP |
| RyoheiHagimoto | 0:0e0631af0305 | 45 | #define OPENCV_TRACKING_HPP |
| RyoheiHagimoto | 0:0e0631af0305 | 46 | |
| RyoheiHagimoto | 0:0e0631af0305 | 47 | #include "opencv2/core.hpp" |
| RyoheiHagimoto | 0:0e0631af0305 | 48 | #include "opencv2/imgproc.hpp" |
| RyoheiHagimoto | 0:0e0631af0305 | 49 | |
| RyoheiHagimoto | 0:0e0631af0305 | 50 | namespace cv |
| RyoheiHagimoto | 0:0e0631af0305 | 51 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 52 | |
| RyoheiHagimoto | 0:0e0631af0305 | 53 | //! @addtogroup video_track |
| RyoheiHagimoto | 0:0e0631af0305 | 54 | //! @{ |
| RyoheiHagimoto | 0:0e0631af0305 | 55 | |
| RyoheiHagimoto | 0:0e0631af0305 | 56 | enum { OPTFLOW_USE_INITIAL_FLOW = 4, |
| RyoheiHagimoto | 0:0e0631af0305 | 57 | OPTFLOW_LK_GET_MIN_EIGENVALS = 8, |
| RyoheiHagimoto | 0:0e0631af0305 | 58 | OPTFLOW_FARNEBACK_GAUSSIAN = 256 |
| RyoheiHagimoto | 0:0e0631af0305 | 59 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 60 | |
| RyoheiHagimoto | 0:0e0631af0305 | 61 | /** @brief Finds an object center, size, and orientation. |
| RyoheiHagimoto | 0:0e0631af0305 | 62 | |
| RyoheiHagimoto | 0:0e0631af0305 | 63 | @param probImage Back projection of the object histogram. See calcBackProject. |
| RyoheiHagimoto | 0:0e0631af0305 | 64 | @param window Initial search window. |
| RyoheiHagimoto | 0:0e0631af0305 | 65 | @param criteria Stop criteria for the underlying meanShift. |
| RyoheiHagimoto | 0:0e0631af0305 | 66 | returns |
| RyoheiHagimoto | 0:0e0631af0305 | 67 | (in old interfaces) Number of iterations CAMSHIFT took to converge |
| RyoheiHagimoto | 0:0e0631af0305 | 68 | The function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an |
| RyoheiHagimoto | 0:0e0631af0305 | 69 | object center using meanShift and then adjusts the window size and finds the optimal rotation. The |
| RyoheiHagimoto | 0:0e0631af0305 | 70 | function returns the rotated rectangle structure that includes the object position, size, and |
| RyoheiHagimoto | 0:0e0631af0305 | 71 | orientation. The next position of the search window can be obtained with RotatedRect::boundingRect() |
| RyoheiHagimoto | 0:0e0631af0305 | 72 | |
| RyoheiHagimoto | 0:0e0631af0305 | 73 | See the OpenCV sample camshiftdemo.c that tracks colored objects. |
| RyoheiHagimoto | 0:0e0631af0305 | 74 | |
| RyoheiHagimoto | 0:0e0631af0305 | 75 | @note |
| RyoheiHagimoto | 0:0e0631af0305 | 76 | - (Python) A sample explaining the camshift tracking algorithm can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 77 | opencv_source_code/samples/python/camshift.py |
| RyoheiHagimoto | 0:0e0631af0305 | 78 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 79 | CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window, |
| RyoheiHagimoto | 0:0e0631af0305 | 80 | TermCriteria criteria ); |
| RyoheiHagimoto | 0:0e0631af0305 | 81 | |
| RyoheiHagimoto | 0:0e0631af0305 | 82 | /** @brief Finds an object on a back projection image. |
| RyoheiHagimoto | 0:0e0631af0305 | 83 | |
| RyoheiHagimoto | 0:0e0631af0305 | 84 | @param probImage Back projection of the object histogram. See calcBackProject for details. |
| RyoheiHagimoto | 0:0e0631af0305 | 85 | @param window Initial search window. |
| RyoheiHagimoto | 0:0e0631af0305 | 86 | @param criteria Stop criteria for the iterative search algorithm. |
| RyoheiHagimoto | 0:0e0631af0305 | 87 | returns |
| RyoheiHagimoto | 0:0e0631af0305 | 88 | : Number of iterations CAMSHIFT took to converge. |
| RyoheiHagimoto | 0:0e0631af0305 | 89 | The function implements the iterative object search algorithm. It takes the input back projection of |
| RyoheiHagimoto | 0:0e0631af0305 | 90 | an object and the initial position. The mass center in window of the back projection image is |
| RyoheiHagimoto | 0:0e0631af0305 | 91 | computed and the search window center shifts to the mass center. The procedure is repeated until the |
| RyoheiHagimoto | 0:0e0631af0305 | 92 | specified number of iterations criteria.maxCount is done or until the window center shifts by less |
| RyoheiHagimoto | 0:0e0631af0305 | 93 | than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search |
| RyoheiHagimoto | 0:0e0631af0305 | 94 | window size or orientation do not change during the search. You can simply pass the output of |
| RyoheiHagimoto | 0:0e0631af0305 | 95 | calcBackProject to this function. But better results can be obtained if you pre-filter the back |
| RyoheiHagimoto | 0:0e0631af0305 | 96 | projection and remove the noise. For example, you can do this by retrieving connected components |
| RyoheiHagimoto | 0:0e0631af0305 | 97 | with findContours , throwing away contours with small area ( contourArea ), and rendering the |
| RyoheiHagimoto | 0:0e0631af0305 | 98 | remaining contours with drawContours. |
| RyoheiHagimoto | 0:0e0631af0305 | 99 | |
| RyoheiHagimoto | 0:0e0631af0305 | 100 | @note |
| RyoheiHagimoto | 0:0e0631af0305 | 101 | - A mean-shift tracking sample can be found at opencv_source_code/samples/cpp/camshiftdemo.cpp |
| RyoheiHagimoto | 0:0e0631af0305 | 102 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 103 | CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria ); |
| RyoheiHagimoto | 0:0e0631af0305 | 104 | |
| RyoheiHagimoto | 0:0e0631af0305 | 105 | /** @brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK. |
| RyoheiHagimoto | 0:0e0631af0305 | 106 | |
| RyoheiHagimoto | 0:0e0631af0305 | 107 | @param img 8-bit input image. |
| RyoheiHagimoto | 0:0e0631af0305 | 108 | @param pyramid output pyramid. |
| RyoheiHagimoto | 0:0e0631af0305 | 109 | @param winSize window size of optical flow algorithm. Must be not less than winSize argument of |
| RyoheiHagimoto | 0:0e0631af0305 | 110 | calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels. |
| RyoheiHagimoto | 0:0e0631af0305 | 111 | @param maxLevel 0-based maximal pyramid level number. |
| RyoheiHagimoto | 0:0e0631af0305 | 112 | @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is |
| RyoheiHagimoto | 0:0e0631af0305 | 113 | constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally. |
| RyoheiHagimoto | 0:0e0631af0305 | 114 | @param pyrBorder the border mode for pyramid layers. |
| RyoheiHagimoto | 0:0e0631af0305 | 115 | @param derivBorder the border mode for gradients. |
| RyoheiHagimoto | 0:0e0631af0305 | 116 | @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false |
| RyoheiHagimoto | 0:0e0631af0305 | 117 | to force data copying. |
| RyoheiHagimoto | 0:0e0631af0305 | 118 | @return number of levels in constructed pyramid. Can be less than maxLevel. |
| RyoheiHagimoto | 0:0e0631af0305 | 119 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 120 | CV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays pyramid, |
| RyoheiHagimoto | 0:0e0631af0305 | 121 | Size winSize, int maxLevel, bool withDerivatives = true, |
| RyoheiHagimoto | 0:0e0631af0305 | 122 | int pyrBorder = BORDER_REFLECT_101, |
| RyoheiHagimoto | 0:0e0631af0305 | 123 | int derivBorder = BORDER_CONSTANT, |
| RyoheiHagimoto | 0:0e0631af0305 | 124 | bool tryReuseInputImage = true ); |
| RyoheiHagimoto | 0:0e0631af0305 | 125 | |
| RyoheiHagimoto | 0:0e0631af0305 | 126 | /** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with |
| RyoheiHagimoto | 0:0e0631af0305 | 127 | pyramids. |
| RyoheiHagimoto | 0:0e0631af0305 | 128 | |
| RyoheiHagimoto | 0:0e0631af0305 | 129 | @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid. |
| RyoheiHagimoto | 0:0e0631af0305 | 130 | @param nextImg second input image or pyramid of the same size and the same type as prevImg. |
| RyoheiHagimoto | 0:0e0631af0305 | 131 | @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be |
| RyoheiHagimoto | 0:0e0631af0305 | 132 | single-precision floating-point numbers. |
| RyoheiHagimoto | 0:0e0631af0305 | 133 | @param nextPts output vector of 2D points (with single-precision floating-point coordinates) |
| RyoheiHagimoto | 0:0e0631af0305 | 134 | containing the calculated new positions of input features in the second image; when |
| RyoheiHagimoto | 0:0e0631af0305 | 135 | OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input. |
| RyoheiHagimoto | 0:0e0631af0305 | 136 | @param status output status vector (of unsigned chars); each element of the vector is set to 1 if |
| RyoheiHagimoto | 0:0e0631af0305 | 137 | the flow for the corresponding features has been found, otherwise, it is set to 0. |
| RyoheiHagimoto | 0:0e0631af0305 | 138 | @param err output vector of errors; each element of the vector is set to an error for the |
| RyoheiHagimoto | 0:0e0631af0305 | 139 | corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't |
| RyoheiHagimoto | 0:0e0631af0305 | 140 | found then the error is not defined (use the status parameter to find such cases). |
| RyoheiHagimoto | 0:0e0631af0305 | 141 | @param winSize size of the search window at each pyramid level. |
| RyoheiHagimoto | 0:0e0631af0305 | 142 | @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single |
| RyoheiHagimoto | 0:0e0631af0305 | 143 | level), if set to 1, two levels are used, and so on; if pyramids are passed to input then |
| RyoheiHagimoto | 0:0e0631af0305 | 144 | algorithm will use as many levels as pyramids have but no more than maxLevel. |
| RyoheiHagimoto | 0:0e0631af0305 | 145 | @param criteria parameter, specifying the termination criteria of the iterative search algorithm |
| RyoheiHagimoto | 0:0e0631af0305 | 146 | (after the specified maximum number of iterations criteria.maxCount or when the search window |
| RyoheiHagimoto | 0:0e0631af0305 | 147 | moves by less than criteria.epsilon. |
| RyoheiHagimoto | 0:0e0631af0305 | 148 | @param flags operation flags: |
| RyoheiHagimoto | 0:0e0631af0305 | 149 | - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is |
| RyoheiHagimoto | 0:0e0631af0305 | 150 | not set, then prevPts is copied to nextPts and is considered the initial estimate. |
| RyoheiHagimoto | 0:0e0631af0305 | 151 | - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see |
| RyoheiHagimoto | 0:0e0631af0305 | 152 | minEigThreshold description); if the flag is not set, then L1 distance between patches |
| RyoheiHagimoto | 0:0e0631af0305 | 153 | around the original and a moved point, divided by number of pixels in a window, is used as a |
| RyoheiHagimoto | 0:0e0631af0305 | 154 | error measure. |
| RyoheiHagimoto | 0:0e0631af0305 | 155 | @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of |
| RyoheiHagimoto | 0:0e0631af0305 | 156 | optical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided |
| RyoheiHagimoto | 0:0e0631af0305 | 157 | by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding |
| RyoheiHagimoto | 0:0e0631af0305 | 158 | feature is filtered out and its flow is not processed, so it allows to remove bad points and get a |
| RyoheiHagimoto | 0:0e0631af0305 | 159 | performance boost. |
| RyoheiHagimoto | 0:0e0631af0305 | 160 | |
| RyoheiHagimoto | 0:0e0631af0305 | 161 | The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See |
| RyoheiHagimoto | 0:0e0631af0305 | 162 | @cite Bouguet00 . The function is parallelized with the TBB library. |
| RyoheiHagimoto | 0:0e0631af0305 | 163 | |
| RyoheiHagimoto | 0:0e0631af0305 | 164 | @note |
| RyoheiHagimoto | 0:0e0631af0305 | 165 | |
| RyoheiHagimoto | 0:0e0631af0305 | 166 | - An example using the Lucas-Kanade optical flow algorithm can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 167 | opencv_source_code/samples/cpp/lkdemo.cpp |
| RyoheiHagimoto | 0:0e0631af0305 | 168 | - (Python) An example using the Lucas-Kanade optical flow algorithm can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 169 | opencv_source_code/samples/python/lk_track.py |
| RyoheiHagimoto | 0:0e0631af0305 | 170 | - (Python) An example using the Lucas-Kanade tracker for homography matching can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 171 | opencv_source_code/samples/python/lk_homography.py |
| RyoheiHagimoto | 0:0e0631af0305 | 172 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 173 | CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, |
| RyoheiHagimoto | 0:0e0631af0305 | 174 | InputArray prevPts, InputOutputArray nextPts, |
| RyoheiHagimoto | 0:0e0631af0305 | 175 | OutputArray status, OutputArray err, |
| RyoheiHagimoto | 0:0e0631af0305 | 176 | Size winSize = Size(21,21), int maxLevel = 3, |
| RyoheiHagimoto | 0:0e0631af0305 | 177 | TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), |
| RyoheiHagimoto | 0:0e0631af0305 | 178 | int flags = 0, double minEigThreshold = 1e-4 ); |
| RyoheiHagimoto | 0:0e0631af0305 | 179 | |
| RyoheiHagimoto | 0:0e0631af0305 | 180 | /** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm. |
| RyoheiHagimoto | 0:0e0631af0305 | 181 | |
| RyoheiHagimoto | 0:0e0631af0305 | 182 | @param prev first 8-bit single-channel input image. |
| RyoheiHagimoto | 0:0e0631af0305 | 183 | @param next second input image of the same size and the same type as prev. |
| RyoheiHagimoto | 0:0e0631af0305 | 184 | @param flow computed flow image that has the same size as prev and type CV_32FC2. |
| RyoheiHagimoto | 0:0e0631af0305 | 185 | @param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image; |
| RyoheiHagimoto | 0:0e0631af0305 | 186 | pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous |
| RyoheiHagimoto | 0:0e0631af0305 | 187 | one. |
| RyoheiHagimoto | 0:0e0631af0305 | 188 | @param levels number of pyramid layers including the initial image; levels=1 means that no extra |
| RyoheiHagimoto | 0:0e0631af0305 | 189 | layers are created and only the original images are used. |
| RyoheiHagimoto | 0:0e0631af0305 | 190 | @param winsize averaging window size; larger values increase the algorithm robustness to image |
| RyoheiHagimoto | 0:0e0631af0305 | 191 | noise and give more chances for fast motion detection, but yield more blurred motion field. |
| RyoheiHagimoto | 0:0e0631af0305 | 192 | @param iterations number of iterations the algorithm does at each pyramid level. |
| RyoheiHagimoto | 0:0e0631af0305 | 193 | @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel; |
| RyoheiHagimoto | 0:0e0631af0305 | 194 | larger values mean that the image will be approximated with smoother surfaces, yielding more |
| RyoheiHagimoto | 0:0e0631af0305 | 195 | robust algorithm and more blurred motion field, typically poly_n =5 or 7. |
| RyoheiHagimoto | 0:0e0631af0305 | 196 | @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a |
| RyoheiHagimoto | 0:0e0631af0305 | 197 | basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a |
| RyoheiHagimoto | 0:0e0631af0305 | 198 | good value would be poly_sigma=1.5. |
| RyoheiHagimoto | 0:0e0631af0305 | 199 | @param flags operation flags that can be a combination of the following: |
| RyoheiHagimoto | 0:0e0631af0305 | 200 | - **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation. |
| RyoheiHagimoto | 0:0e0631af0305 | 201 | - **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$ |
| RyoheiHagimoto | 0:0e0631af0305 | 202 | filter instead of a box filter of the same size for optical flow estimation; usually, this |
| RyoheiHagimoto | 0:0e0631af0305 | 203 | option gives z more accurate flow than with a box filter, at the cost of lower speed; |
| RyoheiHagimoto | 0:0e0631af0305 | 204 | normally, winsize for a Gaussian window should be set to a larger value to achieve the same |
| RyoheiHagimoto | 0:0e0631af0305 | 205 | level of robustness. |
| RyoheiHagimoto | 0:0e0631af0305 | 206 | |
| RyoheiHagimoto | 0:0e0631af0305 | 207 | The function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that |
| RyoheiHagimoto | 0:0e0631af0305 | 208 | |
| RyoheiHagimoto | 0:0e0631af0305 | 209 | \f[\texttt{prev} (y,x) \sim \texttt{next} ( y + \texttt{flow} (y,x)[1], x + \texttt{flow} (y,x)[0])\f] |
| RyoheiHagimoto | 0:0e0631af0305 | 210 | |
| RyoheiHagimoto | 0:0e0631af0305 | 211 | @note |
| RyoheiHagimoto | 0:0e0631af0305 | 212 | |
| RyoheiHagimoto | 0:0e0631af0305 | 213 | - An example using the optical flow algorithm described by Gunnar Farneback can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 214 | opencv_source_code/samples/cpp/fback.cpp |
| RyoheiHagimoto | 0:0e0631af0305 | 215 | - (Python) An example using the optical flow algorithm described by Gunnar Farneback can be |
| RyoheiHagimoto | 0:0e0631af0305 | 216 | found at opencv_source_code/samples/python/opt_flow.py |
| RyoheiHagimoto | 0:0e0631af0305 | 217 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 218 | CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow, |
| RyoheiHagimoto | 0:0e0631af0305 | 219 | double pyr_scale, int levels, int winsize, |
| RyoheiHagimoto | 0:0e0631af0305 | 220 | int iterations, int poly_n, double poly_sigma, |
| RyoheiHagimoto | 0:0e0631af0305 | 221 | int flags ); |
| RyoheiHagimoto | 0:0e0631af0305 | 222 | |
| RyoheiHagimoto | 0:0e0631af0305 | 223 | /** @brief Computes an optimal affine transformation between two 2D point sets. |
| RyoheiHagimoto | 0:0e0631af0305 | 224 | |
| RyoheiHagimoto | 0:0e0631af0305 | 225 | @param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat. |
| RyoheiHagimoto | 0:0e0631af0305 | 226 | @param dst Second input 2D point set of the same size and the same type as A, or another image. |
| RyoheiHagimoto | 0:0e0631af0305 | 227 | @param fullAffine If true, the function finds an optimal affine transformation with no additional |
| RyoheiHagimoto | 0:0e0631af0305 | 228 | restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is |
| RyoheiHagimoto | 0:0e0631af0305 | 229 | limited to combinations of translation, rotation, and uniform scaling (4 degrees of freedom). |
| RyoheiHagimoto | 0:0e0631af0305 | 230 | |
| RyoheiHagimoto | 0:0e0631af0305 | 231 | The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that |
| RyoheiHagimoto | 0:0e0631af0305 | 232 | approximates best the affine transformation between: |
| RyoheiHagimoto | 0:0e0631af0305 | 233 | |
| RyoheiHagimoto | 0:0e0631af0305 | 234 | * Two point sets |
| RyoheiHagimoto | 0:0e0631af0305 | 235 | * Two raster images. In this case, the function first finds some features in the src image and |
| RyoheiHagimoto | 0:0e0631af0305 | 236 | finds the corresponding features in dst image. After that, the problem is reduced to the first |
| RyoheiHagimoto | 0:0e0631af0305 | 237 | case. |
| RyoheiHagimoto | 0:0e0631af0305 | 238 | In case of point sets, the problem is formulated as follows: you need to find a 2x2 matrix *A* and |
| RyoheiHagimoto | 0:0e0631af0305 | 239 | 2x1 vector *b* so that: |
| RyoheiHagimoto | 0:0e0631af0305 | 240 | |
| RyoheiHagimoto | 0:0e0631af0305 | 241 | \f[[A^*|b^*] = arg \min _{[A|b]} \sum _i \| \texttt{dst}[i] - A { \texttt{src}[i]}^T - b \| ^2\f] |
| RyoheiHagimoto | 0:0e0631af0305 | 242 | where src[i] and dst[i] are the i-th points in src and dst, respectively |
| RyoheiHagimoto | 0:0e0631af0305 | 243 | \f$[A|b]\f$ can be either arbitrary (when fullAffine=true ) or have a form of |
| RyoheiHagimoto | 0:0e0631af0305 | 244 | \f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f] |
| RyoheiHagimoto | 0:0e0631af0305 | 245 | when fullAffine=false. |
| RyoheiHagimoto | 0:0e0631af0305 | 246 | |
| RyoheiHagimoto | 0:0e0631af0305 | 247 | @sa |
| RyoheiHagimoto | 0:0e0631af0305 | 248 | estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography |
| RyoheiHagimoto | 0:0e0631af0305 | 249 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 250 | CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine ); |
| RyoheiHagimoto | 0:0e0631af0305 | 251 | |
| RyoheiHagimoto | 0:0e0631af0305 | 252 | |
| RyoheiHagimoto | 0:0e0631af0305 | 253 | enum |
| RyoheiHagimoto | 0:0e0631af0305 | 254 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 255 | MOTION_TRANSLATION = 0, |
| RyoheiHagimoto | 0:0e0631af0305 | 256 | MOTION_EUCLIDEAN = 1, |
| RyoheiHagimoto | 0:0e0631af0305 | 257 | MOTION_AFFINE = 2, |
| RyoheiHagimoto | 0:0e0631af0305 | 258 | MOTION_HOMOGRAPHY = 3 |
| RyoheiHagimoto | 0:0e0631af0305 | 259 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 260 | |
| RyoheiHagimoto | 0:0e0631af0305 | 261 | /** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 . |
| RyoheiHagimoto | 0:0e0631af0305 | 262 | |
| RyoheiHagimoto | 0:0e0631af0305 | 263 | @param templateImage single-channel template image; CV_8U or CV_32F array. |
| RyoheiHagimoto | 0:0e0631af0305 | 264 | @param inputImage single-channel input image which should be warped with the final warpMatrix in |
| RyoheiHagimoto | 0:0e0631af0305 | 265 | order to provide an image similar to templateImage, same type as temlateImage. |
| RyoheiHagimoto | 0:0e0631af0305 | 266 | @param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp). |
| RyoheiHagimoto | 0:0e0631af0305 | 267 | @param motionType parameter, specifying the type of motion: |
| RyoheiHagimoto | 0:0e0631af0305 | 268 | - **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with |
| RyoheiHagimoto | 0:0e0631af0305 | 269 | the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being |
| RyoheiHagimoto | 0:0e0631af0305 | 270 | estimated. |
| RyoheiHagimoto | 0:0e0631af0305 | 271 | - **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three |
| RyoheiHagimoto | 0:0e0631af0305 | 272 | parameters are estimated; warpMatrix is \f$2\times 3\f$. |
| RyoheiHagimoto | 0:0e0631af0305 | 273 | - **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated; |
| RyoheiHagimoto | 0:0e0631af0305 | 274 | warpMatrix is \f$2\times 3\f$. |
| RyoheiHagimoto | 0:0e0631af0305 | 275 | - **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are |
| RyoheiHagimoto | 0:0e0631af0305 | 276 | estimated;\`warpMatrix\` is \f$3\times 3\f$. |
| RyoheiHagimoto | 0:0e0631af0305 | 277 | @param criteria parameter, specifying the termination criteria of the ECC algorithm; |
| RyoheiHagimoto | 0:0e0631af0305 | 278 | criteria.epsilon defines the threshold of the increment in the correlation coefficient between two |
| RyoheiHagimoto | 0:0e0631af0305 | 279 | iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion). |
| RyoheiHagimoto | 0:0e0631af0305 | 280 | Default values are shown in the declaration above. |
| RyoheiHagimoto | 0:0e0631af0305 | 281 | @param inputMask An optional mask to indicate valid values of inputImage. |
| RyoheiHagimoto | 0:0e0631af0305 | 282 | |
| RyoheiHagimoto | 0:0e0631af0305 | 283 | The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion |
| RyoheiHagimoto | 0:0e0631af0305 | 284 | (@cite EP08), that is |
| RyoheiHagimoto | 0:0e0631af0305 | 285 | |
| RyoheiHagimoto | 0:0e0631af0305 | 286 | \f[\texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\f] |
| RyoheiHagimoto | 0:0e0631af0305 | 287 | |
| RyoheiHagimoto | 0:0e0631af0305 | 288 | where |
| RyoheiHagimoto | 0:0e0631af0305 | 289 | |
| RyoheiHagimoto | 0:0e0631af0305 | 290 | \f[\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\f] |
| RyoheiHagimoto | 0:0e0631af0305 | 291 | |
| RyoheiHagimoto | 0:0e0631af0305 | 292 | (the equation holds with homogeneous coordinates for homography). It returns the final enhanced |
| RyoheiHagimoto | 0:0e0631af0305 | 293 | correlation coefficient, that is the correlation coefficient between the template image and the |
| RyoheiHagimoto | 0:0e0631af0305 | 294 | final warped input image. When a \f$3\times 3\f$ matrix is given with motionType =0, 1 or 2, the third |
| RyoheiHagimoto | 0:0e0631af0305 | 295 | row is ignored. |
| RyoheiHagimoto | 0:0e0631af0305 | 296 | |
| RyoheiHagimoto | 0:0e0631af0305 | 297 | Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an |
| RyoheiHagimoto | 0:0e0631af0305 | 298 | area-based alignment that builds on intensity similarities. In essence, the function updates the |
| RyoheiHagimoto | 0:0e0631af0305 | 299 | initial transformation that roughly aligns the images. If this information is missing, the identity |
| RyoheiHagimoto | 0:0e0631af0305 | 300 | warp (unity matrix) should be given as input. Note that if images undergo strong |
| RyoheiHagimoto | 0:0e0631af0305 | 301 | displacements/rotations, an initial transformation that roughly aligns the images is necessary |
| RyoheiHagimoto | 0:0e0631af0305 | 302 | (e.g., a simple euclidean/similarity transform that allows for the images showing the same image |
| RyoheiHagimoto | 0:0e0631af0305 | 303 | content approximately). Use inverse warping in the second image to take an image close to the first |
| RyoheiHagimoto | 0:0e0631af0305 | 304 | one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV |
| RyoheiHagimoto | 0:0e0631af0305 | 305 | sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws |
| RyoheiHagimoto | 0:0e0631af0305 | 306 | an exception if algorithm does not converges. |
| RyoheiHagimoto | 0:0e0631af0305 | 307 | |
| RyoheiHagimoto | 0:0e0631af0305 | 308 | @sa |
| RyoheiHagimoto | 0:0e0631af0305 | 309 | estimateAffine2D, estimateAffinePartial2D, findHomography |
| RyoheiHagimoto | 0:0e0631af0305 | 310 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 311 | CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray inputImage, |
| RyoheiHagimoto | 0:0e0631af0305 | 312 | InputOutputArray warpMatrix, int motionType = MOTION_AFFINE, |
| RyoheiHagimoto | 0:0e0631af0305 | 313 | TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001), |
| RyoheiHagimoto | 0:0e0631af0305 | 314 | InputArray inputMask = noArray()); |
| RyoheiHagimoto | 0:0e0631af0305 | 315 | |
| RyoheiHagimoto | 0:0e0631af0305 | 316 | /** @brief Kalman filter class. |
| RyoheiHagimoto | 0:0e0631af0305 | 317 | |
| RyoheiHagimoto | 0:0e0631af0305 | 318 | The class implements a standard Kalman filter <http://en.wikipedia.org/wiki/Kalman_filter>, |
| RyoheiHagimoto | 0:0e0631af0305 | 319 | @cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get |
| RyoheiHagimoto | 0:0e0631af0305 | 320 | an extended Kalman filter functionality. See the OpenCV sample kalman.cpp. |
| RyoheiHagimoto | 0:0e0631af0305 | 321 | |
| RyoheiHagimoto | 0:0e0631af0305 | 322 | @note |
| RyoheiHagimoto | 0:0e0631af0305 | 323 | |
| RyoheiHagimoto | 0:0e0631af0305 | 324 | - An example using the standard Kalman filter can be found at |
| RyoheiHagimoto | 0:0e0631af0305 | 325 | opencv_source_code/samples/cpp/kalman.cpp |
| RyoheiHagimoto | 0:0e0631af0305 | 326 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 327 | class CV_EXPORTS_W KalmanFilter |
| RyoheiHagimoto | 0:0e0631af0305 | 328 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 329 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 330 | /** @brief The constructors. |
| RyoheiHagimoto | 0:0e0631af0305 | 331 | |
| RyoheiHagimoto | 0:0e0631af0305 | 332 | @note In C API when CvKalman\* kalmanFilter structure is not needed anymore, it should be released |
| RyoheiHagimoto | 0:0e0631af0305 | 333 | with cvReleaseKalman(&kalmanFilter) |
| RyoheiHagimoto | 0:0e0631af0305 | 334 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 335 | CV_WRAP KalmanFilter(); |
| RyoheiHagimoto | 0:0e0631af0305 | 336 | /** @overload |
| RyoheiHagimoto | 0:0e0631af0305 | 337 | @param dynamParams Dimensionality of the state. |
| RyoheiHagimoto | 0:0e0631af0305 | 338 | @param measureParams Dimensionality of the measurement. |
| RyoheiHagimoto | 0:0e0631af0305 | 339 | @param controlParams Dimensionality of the control vector. |
| RyoheiHagimoto | 0:0e0631af0305 | 340 | @param type Type of the created matrices that should be CV_32F or CV_64F. |
| RyoheiHagimoto | 0:0e0631af0305 | 341 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 342 | CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F ); |
| RyoheiHagimoto | 0:0e0631af0305 | 343 | |
| RyoheiHagimoto | 0:0e0631af0305 | 344 | /** @brief Re-initializes Kalman filter. The previous content is destroyed. |
| RyoheiHagimoto | 0:0e0631af0305 | 345 | |
| RyoheiHagimoto | 0:0e0631af0305 | 346 | @param dynamParams Dimensionality of the state. |
| RyoheiHagimoto | 0:0e0631af0305 | 347 | @param measureParams Dimensionality of the measurement. |
| RyoheiHagimoto | 0:0e0631af0305 | 348 | @param controlParams Dimensionality of the control vector. |
| RyoheiHagimoto | 0:0e0631af0305 | 349 | @param type Type of the created matrices that should be CV_32F or CV_64F. |
| RyoheiHagimoto | 0:0e0631af0305 | 350 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 351 | void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F ); |
| RyoheiHagimoto | 0:0e0631af0305 | 352 | |
| RyoheiHagimoto | 0:0e0631af0305 | 353 | /** @brief Computes a predicted state. |
| RyoheiHagimoto | 0:0e0631af0305 | 354 | |
| RyoheiHagimoto | 0:0e0631af0305 | 355 | @param control The optional input control |
| RyoheiHagimoto | 0:0e0631af0305 | 356 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 357 | CV_WRAP const Mat& predict( const Mat& control = Mat() ); |
| RyoheiHagimoto | 0:0e0631af0305 | 358 | |
| RyoheiHagimoto | 0:0e0631af0305 | 359 | /** @brief Updates the predicted state from the measurement. |
| RyoheiHagimoto | 0:0e0631af0305 | 360 | |
| RyoheiHagimoto | 0:0e0631af0305 | 361 | @param measurement The measured system parameters |
| RyoheiHagimoto | 0:0e0631af0305 | 362 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 363 | CV_WRAP const Mat& correct( const Mat& measurement ); |
| RyoheiHagimoto | 0:0e0631af0305 | 364 | |
| RyoheiHagimoto | 0:0e0631af0305 | 365 | CV_PROP_RW Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k) |
| RyoheiHagimoto | 0:0e0631af0305 | 366 | CV_PROP_RW Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) |
| RyoheiHagimoto | 0:0e0631af0305 | 367 | CV_PROP_RW Mat transitionMatrix; //!< state transition matrix (A) |
| RyoheiHagimoto | 0:0e0631af0305 | 368 | CV_PROP_RW Mat controlMatrix; //!< control matrix (B) (not used if there is no control) |
| RyoheiHagimoto | 0:0e0631af0305 | 369 | CV_PROP_RW Mat measurementMatrix; //!< measurement matrix (H) |
| RyoheiHagimoto | 0:0e0631af0305 | 370 | CV_PROP_RW Mat processNoiseCov; //!< process noise covariance matrix (Q) |
| RyoheiHagimoto | 0:0e0631af0305 | 371 | CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R) |
| RyoheiHagimoto | 0:0e0631af0305 | 372 | CV_PROP_RW Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/ |
| RyoheiHagimoto | 0:0e0631af0305 | 373 | CV_PROP_RW Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R) |
| RyoheiHagimoto | 0:0e0631af0305 | 374 | CV_PROP_RW Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) |
| RyoheiHagimoto | 0:0e0631af0305 | 375 | |
| RyoheiHagimoto | 0:0e0631af0305 | 376 | // temporary matrices |
| RyoheiHagimoto | 0:0e0631af0305 | 377 | Mat temp1; |
| RyoheiHagimoto | 0:0e0631af0305 | 378 | Mat temp2; |
| RyoheiHagimoto | 0:0e0631af0305 | 379 | Mat temp3; |
| RyoheiHagimoto | 0:0e0631af0305 | 380 | Mat temp4; |
| RyoheiHagimoto | 0:0e0631af0305 | 381 | Mat temp5; |
| RyoheiHagimoto | 0:0e0631af0305 | 382 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 383 | |
| RyoheiHagimoto | 0:0e0631af0305 | 384 | |
| RyoheiHagimoto | 0:0e0631af0305 | 385 | class CV_EXPORTS_W DenseOpticalFlow : public Algorithm |
| RyoheiHagimoto | 0:0e0631af0305 | 386 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 387 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 388 | /** @brief Calculates an optical flow. |
| RyoheiHagimoto | 0:0e0631af0305 | 389 | |
| RyoheiHagimoto | 0:0e0631af0305 | 390 | @param I0 first 8-bit single-channel input image. |
| RyoheiHagimoto | 0:0e0631af0305 | 391 | @param I1 second input image of the same size and the same type as prev. |
| RyoheiHagimoto | 0:0e0631af0305 | 392 | @param flow computed flow image that has the same size as prev and type CV_32FC2. |
| RyoheiHagimoto | 0:0e0631af0305 | 393 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 394 | CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 395 | /** @brief Releases all inner buffers. |
| RyoheiHagimoto | 0:0e0631af0305 | 396 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 397 | CV_WRAP virtual void collectGarbage() = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 398 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 399 | |
| RyoheiHagimoto | 0:0e0631af0305 | 400 | /** @brief Base interface for sparse optical flow algorithms. |
| RyoheiHagimoto | 0:0e0631af0305 | 401 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 402 | class CV_EXPORTS_W SparseOpticalFlow : public Algorithm |
| RyoheiHagimoto | 0:0e0631af0305 | 403 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 404 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 405 | /** @brief Calculates a sparse optical flow. |
| RyoheiHagimoto | 0:0e0631af0305 | 406 | |
| RyoheiHagimoto | 0:0e0631af0305 | 407 | @param prevImg First input image. |
| RyoheiHagimoto | 0:0e0631af0305 | 408 | @param nextImg Second input image of the same size and the same type as prevImg. |
| RyoheiHagimoto | 0:0e0631af0305 | 409 | @param prevPts Vector of 2D points for which the flow needs to be found. |
| RyoheiHagimoto | 0:0e0631af0305 | 410 | @param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image. |
| RyoheiHagimoto | 0:0e0631af0305 | 411 | @param status Output status vector. Each element of the vector is set to 1 if the |
| RyoheiHagimoto | 0:0e0631af0305 | 412 | flow for the corresponding features has been found. Otherwise, it is set to 0. |
| RyoheiHagimoto | 0:0e0631af0305 | 413 | @param err Optional output vector that contains error response for each point (inverse confidence). |
| RyoheiHagimoto | 0:0e0631af0305 | 414 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 415 | CV_WRAP virtual void calc(InputArray prevImg, InputArray nextImg, |
| RyoheiHagimoto | 0:0e0631af0305 | 416 | InputArray prevPts, InputOutputArray nextPts, |
| RyoheiHagimoto | 0:0e0631af0305 | 417 | OutputArray status, |
| RyoheiHagimoto | 0:0e0631af0305 | 418 | OutputArray err = cv::noArray()) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 419 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 420 | |
| RyoheiHagimoto | 0:0e0631af0305 | 421 | /** @brief "Dual TV L1" Optical Flow Algorithm. |
| RyoheiHagimoto | 0:0e0631af0305 | 422 | |
| RyoheiHagimoto | 0:0e0631af0305 | 423 | The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and |
| RyoheiHagimoto | 0:0e0631af0305 | 424 | @cite Javier2012 . |
| RyoheiHagimoto | 0:0e0631af0305 | 425 | Here are important members of the class that control the algorithm, which you can set after |
| RyoheiHagimoto | 0:0e0631af0305 | 426 | constructing the class instance: |
| RyoheiHagimoto | 0:0e0631af0305 | 427 | |
| RyoheiHagimoto | 0:0e0631af0305 | 428 | - member double tau |
| RyoheiHagimoto | 0:0e0631af0305 | 429 | Time step of the numerical scheme. |
| RyoheiHagimoto | 0:0e0631af0305 | 430 | |
| RyoheiHagimoto | 0:0e0631af0305 | 431 | - member double lambda |
| RyoheiHagimoto | 0:0e0631af0305 | 432 | Weight parameter for the data term, attachment parameter. This is the most relevant |
| RyoheiHagimoto | 0:0e0631af0305 | 433 | parameter, which determines the smoothness of the output. The smaller this parameter is, |
| RyoheiHagimoto | 0:0e0631af0305 | 434 | the smoother the solutions we obtain. It depends on the range of motions of the images, so |
| RyoheiHagimoto | 0:0e0631af0305 | 435 | its value should be adapted to each image sequence. |
| RyoheiHagimoto | 0:0e0631af0305 | 436 | |
| RyoheiHagimoto | 0:0e0631af0305 | 437 | - member double theta |
| RyoheiHagimoto | 0:0e0631af0305 | 438 | Weight parameter for (u - v)\^2, tightness parameter. It serves as a link between the |
| RyoheiHagimoto | 0:0e0631af0305 | 439 | attachment and the regularization terms. In theory, it should have a small value in order |
| RyoheiHagimoto | 0:0e0631af0305 | 440 | to maintain both parts in correspondence. The method is stable for a large range of values |
| RyoheiHagimoto | 0:0e0631af0305 | 441 | of this parameter. |
| RyoheiHagimoto | 0:0e0631af0305 | 442 | |
| RyoheiHagimoto | 0:0e0631af0305 | 443 | - member int nscales |
| RyoheiHagimoto | 0:0e0631af0305 | 444 | Number of scales used to create the pyramid of images. |
| RyoheiHagimoto | 0:0e0631af0305 | 445 | |
| RyoheiHagimoto | 0:0e0631af0305 | 446 | - member int warps |
| RyoheiHagimoto | 0:0e0631af0305 | 447 | Number of warpings per scale. Represents the number of times that I1(x+u0) and grad( |
| RyoheiHagimoto | 0:0e0631af0305 | 448 | I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the |
| RyoheiHagimoto | 0:0e0631af0305 | 449 | method. It also affects the running time, so it is a compromise between speed and |
| RyoheiHagimoto | 0:0e0631af0305 | 450 | accuracy. |
| RyoheiHagimoto | 0:0e0631af0305 | 451 | |
| RyoheiHagimoto | 0:0e0631af0305 | 452 | - member double epsilon |
| RyoheiHagimoto | 0:0e0631af0305 | 453 | Stopping criterion threshold used in the numerical scheme, which is a trade-off between |
| RyoheiHagimoto | 0:0e0631af0305 | 454 | precision and running time. A small value will yield more accurate solutions at the |
| RyoheiHagimoto | 0:0e0631af0305 | 455 | expense of a slower convergence. |
| RyoheiHagimoto | 0:0e0631af0305 | 456 | |
| RyoheiHagimoto | 0:0e0631af0305 | 457 | - member int iterations |
| RyoheiHagimoto | 0:0e0631af0305 | 458 | Stopping criterion iterations number used in the numerical scheme. |
| RyoheiHagimoto | 0:0e0631af0305 | 459 | |
| RyoheiHagimoto | 0:0e0631af0305 | 460 | C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". |
| RyoheiHagimoto | 0:0e0631af0305 | 461 | Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". |
| RyoheiHagimoto | 0:0e0631af0305 | 462 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 463 | class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow |
| RyoheiHagimoto | 0:0e0631af0305 | 464 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 465 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 466 | //! @brief Time step of the numerical scheme |
| RyoheiHagimoto | 0:0e0631af0305 | 467 | /** @see setTau */ |
| RyoheiHagimoto | 0:0e0631af0305 | 468 | CV_WRAP virtual double getTau() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 469 | /** @copybrief getTau @see getTau */ |
| RyoheiHagimoto | 0:0e0631af0305 | 470 | CV_WRAP virtual void setTau(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 471 | //! @brief Weight parameter for the data term, attachment parameter |
| RyoheiHagimoto | 0:0e0631af0305 | 472 | /** @see setLambda */ |
| RyoheiHagimoto | 0:0e0631af0305 | 473 | CV_WRAP virtual double getLambda() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 474 | /** @copybrief getLambda @see getLambda */ |
| RyoheiHagimoto | 0:0e0631af0305 | 475 | CV_WRAP virtual void setLambda(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 476 | //! @brief Weight parameter for (u - v)^2, tightness parameter |
| RyoheiHagimoto | 0:0e0631af0305 | 477 | /** @see setTheta */ |
| RyoheiHagimoto | 0:0e0631af0305 | 478 | CV_WRAP virtual double getTheta() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 479 | /** @copybrief getTheta @see getTheta */ |
| RyoheiHagimoto | 0:0e0631af0305 | 480 | CV_WRAP virtual void setTheta(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 481 | //! @brief coefficient for additional illumination variation term |
| RyoheiHagimoto | 0:0e0631af0305 | 482 | /** @see setGamma */ |
| RyoheiHagimoto | 0:0e0631af0305 | 483 | CV_WRAP virtual double getGamma() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 484 | /** @copybrief getGamma @see getGamma */ |
| RyoheiHagimoto | 0:0e0631af0305 | 485 | CV_WRAP virtual void setGamma(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 486 | //! @brief Number of scales used to create the pyramid of images |
| RyoheiHagimoto | 0:0e0631af0305 | 487 | /** @see setScalesNumber */ |
| RyoheiHagimoto | 0:0e0631af0305 | 488 | CV_WRAP virtual int getScalesNumber() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 489 | /** @copybrief getScalesNumber @see getScalesNumber */ |
| RyoheiHagimoto | 0:0e0631af0305 | 490 | CV_WRAP virtual void setScalesNumber(int val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 491 | //! @brief Number of warpings per scale |
| RyoheiHagimoto | 0:0e0631af0305 | 492 | /** @see setWarpingsNumber */ |
| RyoheiHagimoto | 0:0e0631af0305 | 493 | CV_WRAP virtual int getWarpingsNumber() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 494 | /** @copybrief getWarpingsNumber @see getWarpingsNumber */ |
| RyoheiHagimoto | 0:0e0631af0305 | 495 | CV_WRAP virtual void setWarpingsNumber(int val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 496 | //! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time |
| RyoheiHagimoto | 0:0e0631af0305 | 497 | /** @see setEpsilon */ |
| RyoheiHagimoto | 0:0e0631af0305 | 498 | CV_WRAP virtual double getEpsilon() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 499 | /** @copybrief getEpsilon @see getEpsilon */ |
| RyoheiHagimoto | 0:0e0631af0305 | 500 | CV_WRAP virtual void setEpsilon(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 501 | //! @brief Inner iterations (between outlier filtering) used in the numerical scheme |
| RyoheiHagimoto | 0:0e0631af0305 | 502 | /** @see setInnerIterations */ |
| RyoheiHagimoto | 0:0e0631af0305 | 503 | CV_WRAP virtual int getInnerIterations() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 504 | /** @copybrief getInnerIterations @see getInnerIterations */ |
| RyoheiHagimoto | 0:0e0631af0305 | 505 | CV_WRAP virtual void setInnerIterations(int val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 506 | //! @brief Outer iterations (number of inner loops) used in the numerical scheme |
| RyoheiHagimoto | 0:0e0631af0305 | 507 | /** @see setOuterIterations */ |
| RyoheiHagimoto | 0:0e0631af0305 | 508 | CV_WRAP virtual int getOuterIterations() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 509 | /** @copybrief getOuterIterations @see getOuterIterations */ |
| RyoheiHagimoto | 0:0e0631af0305 | 510 | CV_WRAP virtual void setOuterIterations(int val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 511 | //! @brief Use initial flow |
| RyoheiHagimoto | 0:0e0631af0305 | 512 | /** @see setUseInitialFlow */ |
| RyoheiHagimoto | 0:0e0631af0305 | 513 | CV_WRAP virtual bool getUseInitialFlow() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 514 | /** @copybrief getUseInitialFlow @see getUseInitialFlow */ |
| RyoheiHagimoto | 0:0e0631af0305 | 515 | CV_WRAP virtual void setUseInitialFlow(bool val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 516 | //! @brief Step between scales (<1) |
| RyoheiHagimoto | 0:0e0631af0305 | 517 | /** @see setScaleStep */ |
| RyoheiHagimoto | 0:0e0631af0305 | 518 | CV_WRAP virtual double getScaleStep() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 519 | /** @copybrief getScaleStep @see getScaleStep */ |
| RyoheiHagimoto | 0:0e0631af0305 | 520 | CV_WRAP virtual void setScaleStep(double val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 521 | //! @brief Median filter kernel size (1 = no filter) (3 or 5) |
| RyoheiHagimoto | 0:0e0631af0305 | 522 | /** @see setMedianFiltering */ |
| RyoheiHagimoto | 0:0e0631af0305 | 523 | CV_WRAP virtual int getMedianFiltering() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 524 | /** @copybrief getMedianFiltering @see getMedianFiltering */ |
| RyoheiHagimoto | 0:0e0631af0305 | 525 | CV_WRAP virtual void setMedianFiltering(int val) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 526 | |
| RyoheiHagimoto | 0:0e0631af0305 | 527 | /** @brief Creates instance of cv::DualTVL1OpticalFlow*/ |
| RyoheiHagimoto | 0:0e0631af0305 | 528 | CV_WRAP static Ptr<DualTVL1OpticalFlow> create( |
| RyoheiHagimoto | 0:0e0631af0305 | 529 | double tau = 0.25, |
| RyoheiHagimoto | 0:0e0631af0305 | 530 | double lambda = 0.15, |
| RyoheiHagimoto | 0:0e0631af0305 | 531 | double theta = 0.3, |
| RyoheiHagimoto | 0:0e0631af0305 | 532 | int nscales = 5, |
| RyoheiHagimoto | 0:0e0631af0305 | 533 | int warps = 5, |
| RyoheiHagimoto | 0:0e0631af0305 | 534 | double epsilon = 0.01, |
| RyoheiHagimoto | 0:0e0631af0305 | 535 | int innnerIterations = 30, |
| RyoheiHagimoto | 0:0e0631af0305 | 536 | int outerIterations = 10, |
| RyoheiHagimoto | 0:0e0631af0305 | 537 | double scaleStep = 0.8, |
| RyoheiHagimoto | 0:0e0631af0305 | 538 | double gamma = 0.0, |
| RyoheiHagimoto | 0:0e0631af0305 | 539 | int medianFiltering = 5, |
| RyoheiHagimoto | 0:0e0631af0305 | 540 | bool useInitialFlow = false); |
| RyoheiHagimoto | 0:0e0631af0305 | 541 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 542 | |
| RyoheiHagimoto | 0:0e0631af0305 | 543 | /** @brief Creates instance of cv::DenseOpticalFlow |
| RyoheiHagimoto | 0:0e0631af0305 | 544 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 545 | CV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1(); |
| RyoheiHagimoto | 0:0e0631af0305 | 546 | |
| RyoheiHagimoto | 0:0e0631af0305 | 547 | /** @brief Class computing a dense optical flow using the Gunnar Farneback’s algorithm. |
| RyoheiHagimoto | 0:0e0631af0305 | 548 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 549 | class CV_EXPORTS_W FarnebackOpticalFlow : public DenseOpticalFlow |
| RyoheiHagimoto | 0:0e0631af0305 | 550 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 551 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 552 | CV_WRAP virtual int getNumLevels() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 553 | CV_WRAP virtual void setNumLevels(int numLevels) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 554 | |
| RyoheiHagimoto | 0:0e0631af0305 | 555 | CV_WRAP virtual double getPyrScale() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 556 | CV_WRAP virtual void setPyrScale(double pyrScale) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 557 | |
| RyoheiHagimoto | 0:0e0631af0305 | 558 | CV_WRAP virtual bool getFastPyramids() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 559 | CV_WRAP virtual void setFastPyramids(bool fastPyramids) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 560 | |
| RyoheiHagimoto | 0:0e0631af0305 | 561 | CV_WRAP virtual int getWinSize() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 562 | CV_WRAP virtual void setWinSize(int winSize) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 563 | |
| RyoheiHagimoto | 0:0e0631af0305 | 564 | CV_WRAP virtual int getNumIters() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 565 | CV_WRAP virtual void setNumIters(int numIters) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 566 | |
| RyoheiHagimoto | 0:0e0631af0305 | 567 | CV_WRAP virtual int getPolyN() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 568 | CV_WRAP virtual void setPolyN(int polyN) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 569 | |
| RyoheiHagimoto | 0:0e0631af0305 | 570 | CV_WRAP virtual double getPolySigma() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 571 | CV_WRAP virtual void setPolySigma(double polySigma) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 572 | |
| RyoheiHagimoto | 0:0e0631af0305 | 573 | CV_WRAP virtual int getFlags() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 574 | CV_WRAP virtual void setFlags(int flags) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 575 | |
| RyoheiHagimoto | 0:0e0631af0305 | 576 | CV_WRAP static Ptr<FarnebackOpticalFlow> create( |
| RyoheiHagimoto | 0:0e0631af0305 | 577 | int numLevels = 5, |
| RyoheiHagimoto | 0:0e0631af0305 | 578 | double pyrScale = 0.5, |
| RyoheiHagimoto | 0:0e0631af0305 | 579 | bool fastPyramids = false, |
| RyoheiHagimoto | 0:0e0631af0305 | 580 | int winSize = 13, |
| RyoheiHagimoto | 0:0e0631af0305 | 581 | int numIters = 10, |
| RyoheiHagimoto | 0:0e0631af0305 | 582 | int polyN = 5, |
| RyoheiHagimoto | 0:0e0631af0305 | 583 | double polySigma = 1.1, |
| RyoheiHagimoto | 0:0e0631af0305 | 584 | int flags = 0); |
| RyoheiHagimoto | 0:0e0631af0305 | 585 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 586 | |
| RyoheiHagimoto | 0:0e0631af0305 | 587 | |
| RyoheiHagimoto | 0:0e0631af0305 | 588 | /** @brief Class used for calculating a sparse optical flow. |
| RyoheiHagimoto | 0:0e0631af0305 | 589 | |
| RyoheiHagimoto | 0:0e0631af0305 | 590 | The class can calculate an optical flow for a sparse feature set using the |
| RyoheiHagimoto | 0:0e0631af0305 | 591 | iterative Lucas-Kanade method with pyramids. |
| RyoheiHagimoto | 0:0e0631af0305 | 592 | |
| RyoheiHagimoto | 0:0e0631af0305 | 593 | @sa calcOpticalFlowPyrLK |
| RyoheiHagimoto | 0:0e0631af0305 | 594 | |
| RyoheiHagimoto | 0:0e0631af0305 | 595 | */ |
| RyoheiHagimoto | 0:0e0631af0305 | 596 | class CV_EXPORTS_W SparsePyrLKOpticalFlow : public SparseOpticalFlow |
| RyoheiHagimoto | 0:0e0631af0305 | 597 | { |
| RyoheiHagimoto | 0:0e0631af0305 | 598 | public: |
| RyoheiHagimoto | 0:0e0631af0305 | 599 | CV_WRAP virtual Size getWinSize() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 600 | CV_WRAP virtual void setWinSize(Size winSize) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 601 | |
| RyoheiHagimoto | 0:0e0631af0305 | 602 | CV_WRAP virtual int getMaxLevel() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 603 | CV_WRAP virtual void setMaxLevel(int maxLevel) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 604 | |
| RyoheiHagimoto | 0:0e0631af0305 | 605 | CV_WRAP virtual TermCriteria getTermCriteria() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 606 | CV_WRAP virtual void setTermCriteria(TermCriteria& crit) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 607 | |
| RyoheiHagimoto | 0:0e0631af0305 | 608 | CV_WRAP virtual int getFlags() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 609 | CV_WRAP virtual void setFlags(int flags) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 610 | |
| RyoheiHagimoto | 0:0e0631af0305 | 611 | CV_WRAP virtual double getMinEigThreshold() const = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 612 | CV_WRAP virtual void setMinEigThreshold(double minEigThreshold) = 0; |
| RyoheiHagimoto | 0:0e0631af0305 | 613 | |
| RyoheiHagimoto | 0:0e0631af0305 | 614 | CV_WRAP static Ptr<SparsePyrLKOpticalFlow> create( |
| RyoheiHagimoto | 0:0e0631af0305 | 615 | Size winSize = Size(21, 21), |
| RyoheiHagimoto | 0:0e0631af0305 | 616 | int maxLevel = 3, TermCriteria crit = |
| RyoheiHagimoto | 0:0e0631af0305 | 617 | TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), |
| RyoheiHagimoto | 0:0e0631af0305 | 618 | int flags = 0, |
| RyoheiHagimoto | 0:0e0631af0305 | 619 | double minEigThreshold = 1e-4); |
| RyoheiHagimoto | 0:0e0631af0305 | 620 | }; |
| RyoheiHagimoto | 0:0e0631af0305 | 621 | |
| RyoheiHagimoto | 0:0e0631af0305 | 622 | //! @} video_track |
| RyoheiHagimoto | 0:0e0631af0305 | 623 | |
| RyoheiHagimoto | 0:0e0631af0305 | 624 | } // cv |
| RyoheiHagimoto | 0:0e0631af0305 | 625 | |
| RyoheiHagimoto | 0:0e0631af0305 | 626 | #endif |