diff --git a/+cv/+test/private/.gitignore b/+cv/+test/private/.gitkeep similarity index 100% rename from +cv/+test/private/.gitignore rename to +cv/+test/private/.gitkeep diff --git a/+cv/AGAST.m b/+cv/AGAST.m index eb51ce073..a9ed803c2 100644 --- a/+cv/AGAST.m +++ b/+cv/AGAST.m @@ -1,37 +1,37 @@ %AGAST Detects corners using the AGAST algorithm % -% keypoints = cv.AGAST(im) -% keypoints = cv.AGAST(im, 'OptionName', optionValue, ...) +% keypoints = cv.AGAST(im) +% keypoints = cv.AGAST(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ 8-bit grayscale image where keypoints (corners) are to be detected. % % ## Output % * __keypoints__ Keypoints detected on the image. A 1-by-N structure array. -% It has the following fields: -% * __pt__ coordinates of the keypoint [x,y] -% * __size__ diameter of the meaningful keypoint neighborhood -% * __angle__ computed orientation of the keypoint (-1 if not applicable). -% Its possible values are in a range [0,360) degrees. It is measured -% relative to image coordinate system (y-axis is directed downward), -% ie in clockwise. -% * __response__ the response by which the most strong keypoints have been -% selected. Can be used for further sorting or subsampling. -% * __octave__ octave (pyramid layer) from which the keypoint has been -% extracted. -% * **class_id** object id that can be used to clustered keypoints by an -% object they belong to. +% It has the following fields: +% * __pt__ coordinates of the keypoint `[x,y]` +% * __size__ diameter of the meaningful keypoint neighborhood +% * __angle__ computed orientation of the keypoint (-1 if not applicable). +% Its possible values are in a range [0,360) degrees. It is measured +% relative to image coordinate system (y-axis is directed downward), i.e +% in clockwise. +% * __response__ the response by which the most strong keypoints have been +% selected. Can be used for further sorting or subsampling. +% * __octave__ octave (pyramid layer) from which the keypoint has been +% extracted. +% * **class_id** object id that can be used to clustered keypoints by an +% object they belong to. % % ## Options % * __Threshold__ Threshold on difference between intensity of the central -% pixel and pixels on a circle around this pixel. default 10. -% * __NonmaxSuppression__ If it is true, non-maximum supression is applied -% to detected corners (keypoints). default true. +% pixel and pixels on a circle around this pixel. default 10. +% * __NonmaxSuppression__ If it is true, non-maximum supression is applied to +% detected corners (keypoints). default true. % * __Type__ one of the four neighborhoods as defined in the paper: -% * **AGAST_5_8** -% * **AGAST_7_12d** -% * **AGAST_7_12s** -% * **OAST_9_16** (default) +% * **AGAST_5_8** +% * **AGAST_7_12d** +% * **AGAST_7_12s** +% * **OAST_9_16** (default) % % Detects corners using the AGAST algorithm by [mair2010]. % diff --git a/+cv/AKAZE.m b/+cv/AKAZE.m index 2e847b2d2..db44653b1 100644 --- a/+cv/AKAZE.m +++ b/+cv/AKAZE.m @@ -4,6 +4,11 @@ % As described in [ANB13]. % % Note: AKAZE descriptors can only be used with KAZE or AKAZE keypoints. + % This class is thread-safe. + % + % Note: When you need descriptors use cv.AKAZE.detectAndCompute, which + % provides better performance. When using cv.AKAZE.detect followed by + % cv.AKAZE.compute scale space pyramid is computed twice. % % ## References % [ANB13]: @@ -16,7 +21,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -64,14 +70,14 @@ function this = AKAZE(varargin) %AKAZE The AKAZE constructor % - % obj = cv.AKAZE() - % obj = cv.AKAZE(..., 'OptionName',optionValue, ...) + % obj = cv.AKAZE() + % obj = cv.AKAZE(..., 'OptionName',optionValue, ...) % % ## Options % * __DescriptorType__ See cv.AKAZE.DescriptorType, default 'MLDB' % * __DescriptorSize__ See cv.AKAZE.DescriptorSize, default 0 % * __DescriptorChannels__ See cv.AKAZE.DescriptorChannels, - % default 3 + % default 3 % * __Threshold__ See cv.AKAZE.Threshold, default 0.001 % * __NOctaves__ See cv.AKAZE.NOctaves, default 4 % * __NOctaveLayers__ See cv.AKAZE.NOctaveLayers, default 4 @@ -85,7 +91,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.AKAZE % @@ -96,7 +102,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -110,7 +116,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.AKAZE.empty, cv.AKAZE.load % @@ -120,11 +126,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.AKAZE.clear, cv.AKAZE.load % @@ -134,7 +140,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -150,21 +156,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -178,11 +184,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.AKAZE.save, cv.AKAZE.load % @@ -195,16 +201,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % `L2` when cv.AKAZE.DescriptorType is 'KAZE' or 'KAZEUpright', % otherwise 'Hamming' for 'MLDB' and 'MLDBUpright'. @@ -217,7 +223,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in floats/bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -234,13 +240,13 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. % % `single` when cv.AKAZE.DescriptorType is 'KAZE' or 'KAZEUpright', - % otherwise 'uint8' for 'MLDB' and 'MLDBUpright'. + % otherwise `uint8` for 'MLDB' and 'MLDBUpright'. % % See also: cv.AKAZE.descriptorSize, cv.AKAZE.compute % @@ -250,27 +256,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.AKAZE.compute, cv.AKAZE.detectAndCompute % @@ -280,26 +284,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.AKAZE.detect, cv.AKAZE.detectAndCompute @@ -310,42 +314,42 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input `uint8`/`uint16`/`single` grayscale image. - % Internally image is converted to 32-bit floating-point in - % the [0,1] range. + % Internally image is converted to 32-bit floating-point in the + % [0,1] range. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a vector of - % length cv.AKAZE.descriptorSize, so the total size of - % descriptors will be `numel(keypoints) * obj.descriptorSize()`. - % A matrix of size N-by-sz, one row per keypoint. + % vectors of descriptors. Each descriptor is a vector of length + % cv.AKAZE.descriptorSize, so the total size of descriptors will + % be `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-sz, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.AKAZE.detect, cv.AKAZE.compute % diff --git a/+cv/ANN_MLP.m b/+cv/ANN_MLP.m index c9bd7b6d6..385bdebd3 100644 --- a/+cv/ANN_MLP.m +++ b/+cv/ANN_MLP.m @@ -18,7 +18,7 @@ % next layer. The example below represents a 3-layer perceptron with three % inputs, two outputs, and the hidden layer including five neurons: % - % ![image](http://docs.opencv.org/3.1.0/mlp.png) + % ![image](https://docs.opencv.org/3.3.1/mlp.png) % % All the neurons in MLP are similar. Each of them has several input links % (it takes the output values from several neurons in the previous layer @@ -28,24 +28,24 @@ % bias term. The sum is transformed using the activation function `f` % that may be also different for different neurons. % - % ![image](http://docs.opencv.org/3.1.0/neuron_model.png) + % ![image](https://docs.opencv.org/3.3.1/neuron_model.png) % % In other words, given the outputs `x_j` of the layer `n`, the outputs % `y_i` of the layer `n+1` are computed as: % - % u_i = sum_j (w_{i,j}^{n+1} * x_j) + w_{i,bias}^{n+1} - % y_i = f(u_i) + % u_i = sum_j (w_{i,j}^{n+1} * x_j) + w_{i,bias}^{n+1} + % y_i = f(u_i) % % Different activation functions may be used. ML implements three standard % functions: % % * __Identity__: Identity function `f(x) = y` % * __Sigmoid__: Symmetrical sigmoid, which is the default choice for MLP - % `f(x) = beta * (1-exp(-alpha*x)) / (1+exp(-alpha*x))` + % `f(x) = beta * (1-exp(-alpha*x)) / (1+exp(-alpha*x))` % * __Gaussian__: Gaussian function, which is not completely supported at - % the moment `f(x) = beta * exp(-alpha*x*x)` + % the moment `f(x) = beta * exp(-alpha*x*x)` % - % ![image](http://docs.opencv.org/3.1.0/sigmoid_bipolar.png) + % ![image](https://docs.opencv.org/3.3.1/sigmoid_bipolar.png) % % In ML, all the neurons have the same activation functions, with the % same free parameters (`alpha`, `beta`) that are specified by user and @@ -53,12 +53,12 @@ % % So, the whole trained network works as follows: % - % 1. Take the feature vector as input. The vector size is equal to the - % size of the input layer. - % 2. Pass values as input to the first hidden layer. - % 3. Compute outputs of the hidden layer using the weights and the - % activation functions. - % 4. Pass outputs further downstream until you compute the output layer. + % 1. Take the feature vector as input. The vector size is equal to the + % size of the input layer. + % 2. Pass values as input to the first hidden layer. + % 3. Compute outputs of the hidden layer using the weights and the + % activation functions. + % 4. Pass outputs further downstream until you compute the output layer. % % So, to compute the network, you need to know all the weights % `w_{i,j}^{n+1}`. The weights are computed by the training algorithm. The @@ -93,7 +93,7 @@ % % ## References % [BackPropWikipedia]: - % > [Back-propagation algorithm](http://en.wikipedia.org/wiki/Backpropagation) + % > [Back-propagation algorithm](https://en.wikipedia.org/wiki/Backpropagation) % % [LeCun98]: % > LeCun, L. Bottou, G.B. Orr and K.-R. Muller, "Efficient backprop", @@ -142,9 +142,9 @@ % `struct('type','Count+EPS', 'maxCount',1000, 'epsilon',0.01)`. % A struct with the following fields is accepted: % - % * __type__ one of {'Count', 'EPS', 'Count+EPS'} - % * __maxCount__ maximum number of iterations - % * __epsilon__ error tolerance value + % * __type__ one of {'Count', 'EPS', 'Count+EPS'} + % * __maxCount__ maximum number of iterations + % * __epsilon__ error tolerance value TermCriteria % BPROP: Strength of the weight gradient term. % @@ -194,7 +194,7 @@ function this = ANN_MLP() %ANN_MLP Creates an empty ANN-MLP model % - % model = cv.ANN_MLP() + % model = cv.ANN_MLP() % % Use `train` to train the model, or `load` to load a pre-trained % model. Note that the train method has optional flags. @@ -207,7 +207,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.ANN_MLP % @@ -221,7 +221,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -237,11 +237,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.ANN_MLP.clear, cv.ANN_MLP.load % @@ -251,17 +251,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -275,23 +275,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -305,11 +304,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ANN_MLP.save, cv.ANN_MLP.load % @@ -322,7 +321,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -335,7 +334,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -348,11 +347,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % Always false for ANN MLP models. % @@ -364,133 +363,128 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains/updates the MLP % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should be - % floating-point type. By default, each row represents a sample - % (see the `Layout` option). - % * __responses__ Floating-point matrix of the corresponding - % output vectors, one vector per row. + % floating-point type. By default, each row represents a sample + % (see the `Layout` option). + % * __responses__ Floating-point matrix of the corresponding + % output vectors, one vector per row. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __UpdateWeights__ Algorithm updates the network weights, - % rather than computes them from scratch. In the latter case - % the weights are initialized using the Nguyen-Widrow - % algorithm. default false. + % rather than computes them from scratch. In the latter case the + % weights are initialized using the Nguyen-Widrow algorithm. + % default false. % * __NoInputScale__ Algorithm does not normalize the input - % vectors. If this flag is not set, the training algorithm - % normalizes each input feature independently, shifting its - % mean value to 0 and making the standard deviation equal - % to 1. If the network is assumed to be updated frequently, - % the new training data could be much different from - % original one. In this case, you should take care of proper - % normalization. default false. + % vectors. If this flag is not set, the training algorithm + % normalizes each input feature independently, shifting its mean + % value to 0 and making the standard deviation equal to 1. If + % the network is assumed to be updated frequently, the new + % training data could be much different from original one. In + % this case, you should take care of proper normalization. + % default false. % * __NoOutputScale__ Algorithm does not normalize the output - % vectors. If the flag is not set, the training algorithm - % normalizes each output feature independently, by - % transforming it to the certain range depending on the used - % activation function. default false. + % vectors. If the flag is not set, the training algorithm + % normalizes each output feature independently, by transforming + % it to the certain range depending on the used activation + % function. default false. % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -513,10 +507,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -530,14 +524,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -551,21 +544,21 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples (one or more) stored as rows of - % the floating-point matrix. + % the floating-point matrix. % % ## Output % * __results__ Predicted responses for corresponding samples. % * __f__ Returned when you pass one sample. Otherwise unused and - % returns 0. + % returns 0. % % ## Options % * __Flags__ The optional predict flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % See also: cv.ANN_MLP.train, cv.ANN_MLP.calcError % @@ -578,7 +571,7 @@ function load(this, fname_or_str, varargin) function weights = getWeights(this, layerIdx) %GETWEIGHTS Returns neurons weights of the particular layer % - % weights = model.getWeights(layerIdx) + % weights = model.getWeights(layerIdx) % % ## Input % * __layerIdx__ zero-based index for the layer. @@ -592,20 +585,20 @@ function load(this, fname_or_str, varargin) function setTrainMethod(this, method, varargin) %SETTRAINMETHOD Sets training method and common parameters % - % model.setTrainMethod(method) - % model.setTrainMethod(method, 'OptionName', optionValue, ...) + % model.setTrainMethod(method) + % model.setTrainMethod(method, 'OptionName', optionValue, ...) % % ## Input % * __method__ Available training methods: - % * __Backprop__ The back-propagation algorithm. - % * __RProp__ (default) The RPROP algorithm. See [101] - % for details. + % * __Backprop__ The back-propagation algorithm. + % * __RProp__ (default) The RPROP algorithm. See [101] for + % details. % % ## Options % * __Param1__ sets `RpropDW0` property for 'RProp' and sets - % `BackpropWeightScale` property for 'Backprop'. default 0 + % `BackpropWeightScale` property for 'Backprop'. default 0 % * __Param2__ sets `RpropDWMin` property for 'RProp' and sets - % `BackpropMomentumScale` property for 'Backprop'. default 0 + % `BackpropMomentumScale` property for 'Backprop'. default 0 % % ## References % [101]: @@ -622,29 +615,29 @@ function setTrainMethod(this, method, varargin) function setActivationFunction(this, ftype, varargin) %SETACTIVATIONFUNCTION Initialize the activation function for each neuron % - % model.setActivationFunction(ftype) - % model.setActivationFunction(ftype, 'OptionName', optionValue, ...) + % model.setActivationFunction(ftype) + % model.setActivationFunction(ftype, 'OptionName', optionValue, ...) % % ## Input % * __ftype__ The type of activation function. default 'Sigmoid'. - % Possible activation functions: - % * __Identity__ Identity function: `f(x) = x` - % * __Sigmoid__ Symmetrical sigmoid: - % `f(x) = beta * (1-exp(-alpha*x))/(1+exp(-alpha*x))`. - % See note below. - % * __Gaussian__ Gaussian function: - % `f(x) = beta * exp(-alpha*x*x)` + % Possible activation functions: + % * __Identity__ Identity function: `f(x) = x` + % * __Sigmoid__ Symmetrical sigmoid: + % `f(x) = beta * (1-exp(-alpha*x))/(1+exp(-alpha*x))`. See + % note below. + % * __Gaussian__ Gaussian function: + % `f(x) = beta * exp(-alpha*x*x)` % % ## Options % * __Param1__ The first parameter of the activation function, - % `alpha`. default 0 + % `alpha`. default 0 % * __Param2__ The second parameter of the activation function, - % `beta`. default 0 + % `beta`. default 0 % % Currently the default and the only fully supported activation % function is 'Sigmoid'. % - % ## Note + % ### Note % If you are using the default `Sigmoid` activation function with % the default parameter values `Param1=0` and `Param2=0` then the % function used is `y = 1.7159*tanh(2/3 * x)`, so the output will diff --git a/+cv/AgastFeatureDetector.m b/+cv/AgastFeatureDetector.m index 790925a6c..a0baff39b 100644 --- a/+cv/AgastFeatureDetector.m +++ b/+cv/AgastFeatureDetector.m @@ -14,7 +14,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -41,14 +42,13 @@ function this = AgastFeatureDetector(varargin) %AGASTFEATUREDETECTOR Constructor % - % obj = cv.AgastFeatureDetector() - % obj = cv.AgastFeatureDetector(..., 'OptionName',optionValue, ...) + % obj = cv.AgastFeatureDetector() + % obj = cv.AgastFeatureDetector(..., 'OptionName',optionValue, ...) % % ## Options - % * __Threshold__ See cv.AgastFeatureDetector.Threshold, - % default 10 + % * __Threshold__ See cv.AgastFeatureDetector.Threshold, default 10 % * __NonmaxSuppression__ See - % cv.AgastFeatureDetector.NonmaxSuppression, default true + % cv.AgastFeatureDetector.NonmaxSuppression, default true % * __Type__ See cv.AgastFeatureDetector.Type, default `OAST_9_16` % % See also: cv.AgastFeatureDetector.detect @@ -59,7 +59,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.AgastFeatureDetector % @@ -70,7 +70,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -84,7 +84,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.AgastFeatureDetector.empty, % cv.AgastFeatureDetector.load @@ -95,11 +95,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.AgastFeatureDetector.clear, % cv.AgastFeatureDetector.load @@ -110,7 +110,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -126,21 +126,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -154,11 +154,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.AgastFeatureDetector.save, cv.AgastFeatureDetector.load % @@ -171,28 +171,26 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image where - % keypoints (corners) are detected. + % keypoints (corners) are detected. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.AgastFeatureDetector.AgastFeatureDetector % diff --git a/+cv/AlignMTB.m b/+cv/AlignMTB.m index ff6161c98..03ddd8a21 100644 --- a/+cv/AlignMTB.m +++ b/+cv/AlignMTB.m @@ -22,7 +22,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -43,8 +44,8 @@ function this = AlignMTB(varargin) %ALIGNMTB Creates AlignMTB object % - % obj = cv.AlignMTB() - % obj = cv.AlignMTB('OptionName',optionValue, ...) + % obj = cv.AlignMTB() + % obj = cv.AlignMTB('OptionName',optionValue, ...) % % ## Options % * __MaxBits__ default 6 @@ -59,7 +60,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.AlignMTB % @@ -70,7 +71,7 @@ function delete(this) function shift = calculateShift(this, img0, img1) %CALCULATESHIFT Calculates shift between two images % - % shift = obj.calculateShift(img0, img1) + % shift = obj.calculateShift(img0, img1) % % ## Input % * __img0__ first image (`uint8` grayscale). @@ -90,7 +91,7 @@ function delete(this) function dst = shiftMat(this, src, shift) %SHIFTMAT Helper function, that shift Mat filling new regions with zeros % - % dst = obj.shiftMat(src, shift) + % dst = obj.shiftMat(src, shift) % % ## Input % * __src__ input image. @@ -107,14 +108,14 @@ function delete(this) function [tb, eb] = computeBitmaps(this, img) %COMPUTEBITMAPS Computes median threshold and exclude bitmaps of given image % - % [tb,eb] = obj.computeBitmaps(img) + % [tb, eb] = obj.computeBitmaps(img) % % ## Input % * __img__ input image (`uint8` grayscale). % % ## Output % * __tb__ median threshold bitmap, of same size as `img` and - % `uint8` type. + % `uint8` type. % * __eb__ exclude bitmap, of same size as `img` and `uint8` type. % % See also: cv.AlignMTB.process @@ -128,11 +129,11 @@ function delete(this) function dst = process(this, src) %PROCESS Aligns images % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ cell array of input images (RGB), all of the same size - % and `uint8` type. + % and `uint8` type. % % ## Output % * __dst__ cell array of aligned images, of same length as `src`. @@ -148,7 +149,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.AlignMTB.empty, cv.AlignMTB.load % @@ -158,11 +159,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.AlignMTB.clear, cv.AlignMTB.load % @@ -172,11 +173,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.AlignMTB.save, cv.AlignMTB.load % @@ -186,7 +187,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -202,21 +203,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/BOWImgDescriptorExtractor.m b/+cv/BOWImgDescriptorExtractor.m index 305fac0f7..249dbd08b 100644 --- a/+cv/BOWImgDescriptorExtractor.m +++ b/+cv/BOWImgDescriptorExtractor.m @@ -13,14 +13,14 @@ % % ## Example % - % % create bag of visual words - % trainer = cv.BOWKMeansTrainer(K); - % dictionary = trainer.cluster(train_descs); + % % create bag of visual words + % trainer = cv.BOWKMeansTrainer(K); + % dictionary = trainer.cluster(train_descs); % - % % Compute histogram of visual word occurrences of an image - % extractor = cv.BOWImgDescriptorExtractor('SIFT','BruteForce'); - % extractor.Vocabulary = dictionary; - % descs = extractor.compute(im, keypoints); + % % Compute histogram of visual word occurrences of an image + % extractor = cv.BOWImgDescriptorExtractor('SIFT','BruteForce'); + % extractor.Vocabulary = dictionary; + % descs = extractor.compute(im, keypoints); % % See also: cv.BOWImgDescriptorExtractor.BOWImgDescriptorExtractor, % cv.BOWKMeansTrainer, bagOfFeatures, trainImageCategoryClassifier, @@ -44,46 +44,45 @@ function this = BOWImgDescriptorExtractor(dextractor, dmatcher) %BOWIMGDESCRIPTOREXTRACTOR The constructor % - % extractor = cv.BOWImgDescriptorExtractor(dextractor, dmatcher) - % extractor = cv.BOWImgDescriptorExtractor({dextractor, 'key',val,...}, {dmatcher, 'key',val,...}) + % extractor = cv.BOWImgDescriptorExtractor(dextractor, dmatcher) + % extractor = cv.BOWImgDescriptorExtractor({dextractor, 'key',val,...}, {dmatcher, 'key',val,...}) % % ## Input % * __dextractor__ Descriptor extractor that is used to compute - % descriptors for an input image and its keypoints. It can - % be specified by a string containing the type of - % descriptor extractor, such as 'SIFT' or 'SURF'. See - % cv.DescriptorExtractor.DescriptorExtractor for possible - % types. + % descriptors for an input image and its keypoints. It can be + % specified by a string containing the type of descriptor + % extractor, such as 'SIFT' or 'SURF'. See + % cv.DescriptorExtractor.DescriptorExtractor for possible types. % * __dmatcher__ Descriptor matcher that is used to find the - % nearest word of the trained vocabulary for each keypoint - % descriptor of the image. It can be spacified by a string - % specifying the type of descriptor extractor, such as - % 'BruteForce' or 'BruteForce-L1'. See - % cv.DescriptorMatcher.DescriptorMatcher for possible types. - % default 'BruteForce' + % nearest word of the trained vocabulary for each keypoint + % descriptor of the image. It can be spacified by a string + % specifying the type of descriptor extractor, such as + % 'BruteForce' or 'BruteForce-L1'. See + % cv.DescriptorMatcher.DescriptorMatcher for possible types. + % default 'BruteForce' % - % In the first variant, it creates descriptor extractor/matcher - % of the given types using default parameters (by calling the - % default constructors). + % In the first variant, it creates descriptor extractor/matcher of + % the given types using default parameters (by calling the default + % constructors). % % In the second variant, it creates descriptor extractor/matcher - % of the given types using the specified options. - % Each algorithm type takes optional arguments. Each of the - % extractor/matcher are specified by a cell-array that starts - % with the type name followed by option arguments, as in: - % `{'Type', 'OptionName',optionValue, ...}`. - % Refer to the individual extractor/matcher functions to see a - % list of possible options of each algorithm. + % of the given types using the specified options. Each algorithm + % type takes optional arguments. Each of the extractor/matcher are + % specified by a cell-array that starts with the type name + % followed by option arguments, as in: + % `{'Type', 'OptionName',optionValue, ...}`. Refer to the + % individual extractor/matcher functions to see a list of possible + % options of each algorithm. % % ## Example % - % % first variant - % extractor = cv.BOWImgDescriptorExtractor('ORB', 'BruteForce'); + % % first variant + % extractor = cv.BOWImgDescriptorExtractor('ORB', 'BruteForce'); % - % % second variant - % extractor = cv.BOWImgDescriptorExtractor(... - % {'FastFeatureDetector', 'Threshold',10}, ... - % {'BFMatcher', 'NormType','L2'}); + % % second variant + % extractor = cv.BOWImgDescriptorExtractor(... + % {'FastFeatureDetector', 'Threshold',10}, ... + % {'BFMatcher', 'NormType','L2'}); % % See also: cv.DescriptorExtractor, cv.DescriptorMatcher % @@ -94,7 +93,7 @@ function delete(this) %DELETE Destructor % - % extractor.delete() + % extractor.delete() % % See also: cv.BOWImgDescriptorExtractor % @@ -105,11 +104,11 @@ function delete(this) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns image discriptor size % - % sz = extractor.descriptorSize() + % sz = extractor.descriptorSize() % % ## Output - % * __sz__ Returns an image discriptor size if the vocabulary - % is set. Otherwise, it returns 0. + % * __sz__ Returns an image discriptor size if the vocabulary is + % set. Otherwise, it returns 0. % % This is basically `size(Vocabulary,1)` (i.e number of clusters). % @@ -121,11 +120,11 @@ function delete(this) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns image descriptor type % - % dtype = extractor.descriptorType() + % dtype = extractor.descriptorType() % % ## Output - % * __dtype__ Returns an image descriptor type, one of - % numeric MATLAB class names. + % * __dtype__ Returns an image descriptor type, one of numeric + % MATLAB class names. % % Always `single` for BOWImgDescriptorExtractor. % @@ -138,23 +137,22 @@ function delete(this) function [bow,idx,kptDescs] = compute(this, img, keypoints) %COMPUTE Computes an image descriptor using the set visual vocabulary % - % [bow,idx,kptDescs] = extractor.compute(img, keypoints) + % [bow,idx,kptDescs] = extractor.compute(img, keypoints) % % ## Input % * __img__ Image, for which the descriptor is computed. - % * __keypoints__ Keypoints detected in the input image. It is - % a struct array that is returned by - % cv.FeatureDetector.detect. + % * __keypoints__ Keypoints detected in the input image. It is a + % struct array that is returned by cv.FeatureDetector.detect. % % ## Output % * __bow__ Computed output image descriptor. A vector of the same - % length as the vocabulary dimension. + % length as the vocabulary dimension. % * __idx__ Indices of keypoints that belong to the cluster. A - % cell array of integer vectors. This means that `idx{i}` - % are keypoint indices that belong to the i-th cluster - % (word of vocabulary). + % cell array of integer vectors. This means that `idx{i}` are + % keypoint indices that belong to the i-th cluster (word of + % vocabulary). % * __kptDescs__ Descriptors of the image keypoints, as returned - % by cv.DescriptorExtractor.compute. + % by cv.DescriptorExtractor.compute. % % See also: cv.BOWImgDescriptorExtractor.compute2, % cv.BOWImgDescriptorExtractor.compute1 @@ -171,20 +169,20 @@ function delete(this) function [bow,idx] = compute1(this, kptDescs) %COMPUTE1 Computes an image descriptor using keypoint descriptors % - % [bow,idx] = extractor.compute1(kptDescs) + % [bow,idx] = extractor.compute1(kptDescs) % % ## Input - % * __kptDescs__ Computed descriptors to match with vocabulary. - % It is a numeric matrix that is returned by - % cv.DescriptorExtractor.compute. + % * __kptDescs__ Computed descriptors to match with vocabulary. It + % is a numeric matrix that is returned by + % cv.DescriptorExtractor.compute. % % ## Output % * __bow__ Computed output image descriptor. A vector of the same - % length as the vocabulary dimension. + % length as the vocabulary dimension. % * __idx__ Indices of keypoints that belong to the cluster. A - % cell array of integer vectors. This means that `idx{i}` - % are keypoint indices that belong to the i-th cluster - % (word of vocabulary). + % cell array of integer vectors. This means that `idx{i}` are + % keypoint indices that belong to the i-th cluster (word of + % vocabulary). % % See also: cv.BOWImgDescriptorExtractor.compute % @@ -198,17 +196,16 @@ function delete(this) function bow = compute2(this, img, keypoints) %COMPUTE2 Computes an image descriptor using the set visual vocabulary % - % bow = extrctor.compute2(img, keypoints) + % bow = extrctor.compute2(img, keypoints) % % ## Input % * __img__ Image, for which the descriptor is computed. - % * __keypoints__ Keypoints detected in the input image. It is - % a struct array that is returned by - % cv.FeatureDetector.detect. + % * __keypoints__ Keypoints detected in the input image. It is a + % struct array that is returned by cv.FeatureDetector.detect. % % ## Output % * __bow__ Computed output image descriptor. A vector of the same - % length as the vocabulary dimension. + % length as the vocabulary dimension. % % See also: cv.BOWImgDescriptorExtractor.compute % diff --git a/+cv/BOWKMeansTrainer.m b/+cv/BOWKMeansTrainer.m index 3360fda51..2ee59adf0 100644 --- a/+cv/BOWKMeansTrainer.m +++ b/+cv/BOWKMeansTrainer.m @@ -6,14 +6,14 @@ % % ## Example % - % % create bag of visual words - % trainer = cv.BOWKMeansTrainer(K); - % dictionary = trainer.cluster(train_descs); + % % create bag of visual words + % trainer = cv.BOWKMeansTrainer(K); + % dictionary = trainer.cluster(train_descs); % - % % Compute histogram of visual word occurrences of an image - % extractor = cv.BOWImgDescriptorExtractor('SIFT','BruteForce'); - % extractor.Vocabulary = dictionary; - % descs = extractor.compute(im, keypoints); + % % Compute histogram of visual word occurrences of an image + % extractor = cv.BOWImgDescriptorExtractor('SIFT','BruteForce'); + % extractor.Vocabulary = dictionary; + % descs = extractor.compute(im, keypoints); % % ## References % > "Visual Categorization with Bags of Keypoints" by @@ -33,27 +33,27 @@ function this = BOWKMeansTrainer(dictionarySize, varargin) %BOWKMEANSTRAINER The constructor % - % trainer = cv.BOWKMeansTrainer(dictionarySize) - % [...] = cv.BOWKMeansTrainer(...,'OptionName', optionValue, ...) + % trainer = cv.BOWKMeansTrainer(dictionarySize) + % [...] = cv.BOWKMeansTrainer(...,'OptionName', optionValue, ...) % % ## Input % * __dictionarySize__ Number of clusters. % % ## Options % * __Criteria__ The algorithm termination criteria, that is, the - % maximum number of iterations and/or the desired accuracy. - % The accuracy is specified as `Criteria.epsilon`. As soon - % as each of the cluster centers moves by less than - % `Criteria.epsilon` on some iteration, the algorithm stops. - % default `struct('type','Count+EPS', 'maxCount',100, 'epsilon',eps('float'))` + % maximum number of iterations and/or the desired accuracy. The + % accuracy is specified as `Criteria.epsilon`. As soon as each + % of the cluster centers moves by less than `Criteria.epsilon` + % on some iteration, the algorithm stops. default + % `struct('type','Count+EPS', 'maxCount',100, 'epsilon',eps('float'))` % * __Attempts__ The number of times the algorithm is executed - % using different initial labelings. The algorithm returns - % the labels that yield the best compactness. default 3. + % using different initial labelings. The algorithm returns the + % labels that yield the best compactness. default 3. % * __Initialization__ Method to initialize seeds, default 'PP'. - % One of the followings: - % * __Random__ Select random initial centers in each attempt. - % * __PP__ Use kmeans++ center initialization by - % Arthur and Vassilvitskii [Arthur2007]. + % One of the followings: + % * __Random__ Select random initial centers in each attempt. + % * __PP__ Use kmeans++ center initialization by + % Arthur and Vassilvitskii [Arthur2007]. % % See also: cv.BOWKMeansTrainer, cv.kmeans % @@ -63,7 +63,7 @@ function delete(this) %DELETE Destructor % - % trainer.delete() + % trainer.delete() % % See also: cv.BOWKMeansTrainer % @@ -74,7 +74,7 @@ function delete(this) function descs = getDescriptors(this) %GETDESCRIPTORS Returns a training set of descriptors % - % descs = trainer.getDescriptors() + % descs = trainer.getDescriptors() % % ## Output % * __descs__ a cell array of descriptors @@ -87,7 +87,7 @@ function delete(this) function count = descriptorsCount(this) %DESCRIPTORSCOUNT Returns the count of all descriptors stored in the training set % - % count = trainer.descriptorsCount() + % count = trainer.descriptorsCount() % % ## Output % * __count__ is a numeric value @@ -100,11 +100,11 @@ function delete(this) function add(this, descs) %ADD Adds descriptors to a training set % - % trainer.add(descs) + % trainer.add(descs) % % ## Input % * __descs__ Descriptors to add to a training set. Each row of - % the descriptors matrix is a descriptor. + % the descriptors matrix is a descriptor. % % The training set is clustered using cluster method to construct % the vocabulary. @@ -117,7 +117,7 @@ function add(this, descs) function clear(this) %CLEAR Clear training descriptors % - % trainer.clear() + % trainer.clear() % % See also: cv.BOWKMeansTrainer.add % @@ -127,13 +127,13 @@ function clear(this) function centers = cluster(this, descs) %CLUSTER Clusters train descriptors % - % centers = trainer.cluster() - % centers = trainer.cluster(descs) + % centers = trainer.cluster() + % centers = trainer.cluster(descs) % % ## Input % * __descs__ Descriptors to cluster. Each row of the - % descriptors matrix is a descriptor. Descriptors are not - % added to the inner train descriptor set. + % descriptors matrix is a descriptor. Descriptors are not added + % to the inner train descriptor set. % % ## Output % * __centers__ Row vectors of vocabulary descriptors. diff --git a/+cv/BRISK.m b/+cv/BRISK.m index c6177655d..afab164c0 100644 --- a/+cv/BRISK.m +++ b/+cv/BRISK.m @@ -15,40 +15,44 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = BRISK(varargin) %BRISK The BRISK constructor % - % obj = cv.BRISK() - % obj = cv.BRISK(radiusList, numberList) - % obj = cv.BRISK(..., 'OptionName',optionValue, ...) + % obj = cv.BRISK() + % obj = cv.BRISK(radiusList, numberList) + % obj = cv.BRISK(..., 'OptionName',optionValue, ...) % % ## Input % * __radiusList__ defines the radii (in pixels) where the samples - % around a keypoint are taken (for keypoint scale 1). + % around a keypoint are taken (for keypoint scale 1). % * __numberList__ defines the number of sampling points on the - % sampling circle. Must be the same size as `radiusList`. + % sampling circle. Must be the same size as `radiusList`. % % ## Options % Options accepted by first variant: % % * __Threshold__ FAST/AGAST detection threshold score. default 30 % * __Octaves__ detection octaves. Use 0 to do single scale. - % default 3 + % default 3 % * __PatternScale__ apply this scale to the pattern used for - % sampling the neighbourhood of a keypoint. default 1.0 + % sampling the neighbourhood of a keypoint. default 1.0 % % Options accepted by second variant for a custom pattern: % + % * __Threshold__ FAST/AGAST detection threshold score. default 20 + % * __Octaves__ detection octaves. Use 0 to do single scale. + % default 3 % * __DMax__ threshold for the short pairings used for descriptor - % formation (in pixels for keypoint scale 1). default 5.85 + % formation (in pixels for keypoint scale 1). default 5.85 % * __DMin__ threshold for the long pairings used for orientation - % determination (in pixels for keypoint scale 1). default 8.2 + % determination (in pixels for keypoint scale 1). default 8.2 % * __IndexChange__ index remapping of the bits. - % default empty vector. + % default empty vector. % % See also: cv.BRISK.detectAndCompute % @@ -58,7 +62,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BRISK % @@ -69,7 +73,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -83,7 +87,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BRISK.empty, cv.BRISK.load % @@ -93,11 +97,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.BRISK.clear, cv.BRISK.load % @@ -107,7 +111,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +127,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +155,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BRISK.save, cv.BRISK.load % @@ -168,16 +172,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for BRISK. % @@ -189,7 +193,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -202,7 +206,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -217,27 +221,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.BRISK.compute, cv.BRISK.detectAndCompute % @@ -247,26 +249,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.BRISK.detect, cv.BRISK.detectAndCompute @@ -277,41 +279,40 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a vector of - % length cv.BRISK.descriptorSize, so the total size of - % descriptors will be `numel(keypoints) * obj.descriptorSize()`. - % A matrix of size N-by-sz of class `uint8`, one row per - % keypoint. + % vectors of descriptors. Each descriptor is a vector of length + % cv.BRISK.descriptorSize, so the total size of descriptors will + % be `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-sz of class `uint8`, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.BRISK.detect, cv.BRISK.compute % diff --git a/+cv/BackgroundSubtractorKNN.m b/+cv/BackgroundSubtractorKNN.m index 6a2ae1f18..a4fd02fdd 100644 --- a/+cv/BackgroundSubtractorKNN.m +++ b/+cv/BackgroundSubtractorKNN.m @@ -22,7 +22,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -62,19 +63,18 @@ function this = BackgroundSubtractorKNN(varargin) %BACKGROUNDSUBTRACTORKNN Creates KNN Background Subtractor % - % bs = cv.BackgroundSubtractorKNN() - % bs = cv.BackgroundSubtractorKNN('OptionName', optionValue, ...) + % bs = cv.BackgroundSubtractorKNN() + % bs = cv.BackgroundSubtractorKNN('OptionName', optionValue, ...) % % ## Options % * __History__ Length of the history. default 500 % * __Dist2Threshold__ Threshold on the squared distance between - % the pixel and the sample to decide whether a pixel is - % close to that sample. This parameter does not affect the - % background update. default 400.0 + % the pixel and the sample to decide whether a pixel is close to + % that sample. This parameter does not affect the background + % update. default 400.0 % * __DetectShadows__ If true, the algorithm will detect shadows - % and mark them. It decreases the speed a bit, so if you do - % not need this feature, set the parameter to false. - % default true + % and mark them. It decreases the speed a bit, so if you do not + % need this feature, set the parameter to false. default true % % See also: cv.BackgroundSubtractorKNN % @@ -84,7 +84,7 @@ function delete(this) %DELETE Destructor % - % bs.delete() + % bs.delete() % % See also: cv.BackgroundSubtractorKNN % @@ -95,24 +95,24 @@ function delete(this) function fgmask = apply(this, im, varargin) %APPLY Updates the background model and computes the foreground mask % - % fgmask = bs.apply(im) - % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Next video frame. % % ## Output % * __fgmask__ The output foreground mask as an 8-bit binary image - % (0 for background, 255 for foregound, and `ShadowValue` - % for shadows if `DetectShadows` is true). + % (0 for background, 255 for foregound, and `ShadowValue` for + % shadows if `DetectShadows` is true). % % ## Options % * __LearningRate__ The value between 0 and 1 that indicates how - % fast the background model is learnt. Negative parameter - % value makes the algorithm to use some automatically chosen - % learning rate. 0 means that the background model is not - % updated at all, 1 means that the background model is - % completely reinitialized from the last frame. default -1 + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 % % See also: cv.BackgroundSubtractorKNN.getBackgroundImage % @@ -122,13 +122,13 @@ function delete(this) function bgImg = getBackgroundImage(this) %GETBACKGROUNDIMAGE Computes a background image % - % bgImg = bs.getBackgroundImage() + % bgImg = bs.getBackgroundImage() % % ## Output % * __bgImg__ The output background image, which is the mean of - % all background Gaussians. + % all background Gaussians. % - % ## Note + % ### Note % Sometimes the background image can be very blurry, as it contain % the average background statistics. % @@ -143,7 +143,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BackgroundSubtractorKNN.empty % @@ -153,11 +153,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.BackgroundSubtractorKNN.clear % @@ -167,11 +167,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BackgroundSubtractorKNN.save, cv.BackgroundSubtractorKNN.load % @@ -181,7 +181,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -196,21 +196,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/BackgroundSubtractorMOG2.m b/+cv/BackgroundSubtractorMOG2.m index b63806176..c151703fb 100644 --- a/+cv/BackgroundSubtractorMOG2.m +++ b/+cv/BackgroundSubtractorMOG2.m @@ -42,7 +42,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -107,19 +108,18 @@ function this = BackgroundSubtractorMOG2(varargin) %BACKGROUNDSUBTRACTORMOG2 Creates MOG2 Background Subtractor % - % bs = cv.BackgroundSubtractorMOG2() - % bs = cv.BackgroundSubtractorMOG2('OptionName', optionValue, ...) + % bs = cv.BackgroundSubtractorMOG2() + % bs = cv.BackgroundSubtractorMOG2('OptionName', optionValue, ...) % % ## Options % * __History__ Length of the history. default 500 % * __VarThreshold__ Threshold on the squared Mahalanobis distance - % between the pixel and the model to decide whether a pixel - % is well described by the background model. This parameter - % does not affect the background update. default 16 + % between the pixel and the model to decide whether a pixel is + % well described by the background model. This parameter does + % not affect the background update. default 16 % * __DetectShadows__ If true, the algorithm will detect shadows - % and mark them. It decreases the speed a bit, so if you do - % not need this feature, set the parameter to false. - % default true + % and mark them. It decreases the speed a bit, so if you do not + % need this feature, set the parameter to false. default true % % See also: cv.BackgroundSubtractorMOG2 % @@ -129,7 +129,7 @@ function delete(this) %DELETE Destructor % - % bs.delete() + % bs.delete() % % See also: cv.BackgroundSubtractorMOG2 % @@ -140,26 +140,25 @@ function delete(this) function fgmask = apply(this, im, varargin) %APPLY Updates the background model and computes the foreground mask % - % fgmask = bs.apply(im) - % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Next video frame, uint8 or single. Floating point frame - % will be used without scaling and should be in range - % [0,255]. + % will be used without scaling and should be in range [0,255]. % % ## Output % * __fgmask__ The output foreground mask as an 8-bit binary image - % (0 for background, 255 for foregound, and `ShadowValue` - % for shadows if `DetectShadows` is true). + % (0 for background, 255 for foregound, and `ShadowValue` for + % shadows if `DetectShadows` is true). % % ## Options % * __LearningRate__ The value between 0 and 1 that indicates how - % fast the background model is learnt. Negative parameter - % value makes the algorithm to use some automatically chosen - % learning rate. 0 means that the background model is not - % updated at all, 1 means that the background model is - % completely reinitialized from the last frame. default -1 + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 % % See also: cv.BackgroundSubtractorMOG2.getBackgroundImage % @@ -169,13 +168,13 @@ function delete(this) function bgImg = getBackgroundImage(this) %GETBACKGROUNDIMAGE Computes a background image % - % bgImg = bs.getBackgroundImage() + % bgImg = bs.getBackgroundImage() % % ## Output % * __bgImg__ The output background image, which is the mean of - % all background Gaussians. + % all background Gaussians. % - % ## Note + % ### Note % Sometimes the background image can be very blurry, as it contain % the average background statistics. % @@ -190,7 +189,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BackgroundSubtractorMOG2.empty % @@ -200,11 +199,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.BackgroundSubtractorMOG2.clear % @@ -214,11 +213,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BackgroundSubtractorMOG2.save, cv.BackgroundSubtractorMOG2.load % @@ -228,7 +227,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -243,21 +242,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/Blender.m b/+cv/Blender.m index 288903fc5..861e3b8bc 100644 --- a/+cv/Blender.m +++ b/+cv/Blender.m @@ -5,24 +5,25 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = Blender(blenderType, varargin) %BLENDER Constructor % - % obj = cv.Blender(blenderType) - % obj = cv.Blender(blenderType, 'OptionName',optionValue, ...) + % obj = cv.Blender(blenderType) + % obj = cv.Blender(blenderType, 'OptionName',optionValue, ...) % % ## Input % * __blenderType__ image blender type. One of: - % * __NoBlender__ Simple blender which puts one image over - % another. - % * __FeatherBlender__ Simple blender which mixes images at - % its borders. - % * __MultiBandBlender__ Blender which uses multi-band - % blending algorithm (see [BA83]). + % * __NoBlender__ Simple blender which puts one image over + % another. + % * __FeatherBlender__ Simple blender which mixes images at its + % borders. + % * __MultiBandBlender__ Blender which uses multi-band blending + % algorithm (see [BA83]). % % ## Options % The following are options for the various algorithms: @@ -34,8 +35,8 @@ % * __TryGPU__ default false % * __NumBands__ default 5 % * __WeightType__ One of: - % * __single__ (default) - % * __int16__ + % * __single__ (default) + % * __int16__ % % ## References % [BA83]: @@ -51,7 +52,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Blender % @@ -62,7 +63,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = Blender_(this.id, 'typeid'); end @@ -73,16 +77,16 @@ function delete(this) function prepare(this, varargin) %PREPARE Prepares the blender for blending % - % obj.prepare(corners, sizes) - % obj.prepare(dst_roi) + % obj.prepare(corners, sizes) + % obj.prepare(dst_roi) % % ## Input - % * __corners__ Source images top-left corners. - % Cell-array of 2D points `{[x,y],...}`. - % * __sizes__ Source image sizes. - % Cell-array of sizes `{[w,h],...}`. + % * __corners__ Source images top-left corners. Cell-array of 2D + % points `{[x,y],...}`. + % * __sizes__ Source image sizes. Cell-array of sizes + % `{[w,h],...}`. % * **dst_roi** Destination ROI rectangle `[x,y,w,h]`, same as - % obtained by calling `cv.Blender.resultRoi(corners,sizes)`. + % obtained by calling `cv.Blender.resultRoi(corners,sizes)`. % % See also: cv.Blender.feed % @@ -92,7 +96,7 @@ function prepare(this, varargin) function feed(this, img, mask, tl) %FEED Processes the image % - % obj.feed(img, mask, tl) + % obj.feed(img, mask, tl) % % ## Input % * __img__ Source image, 3-channel of type `int16`. @@ -107,8 +111,8 @@ function feed(this, img, mask, tl) function [dst, dst_mask] = blend(this, varargin) %BLEND Blends and returns the final pano % - % [dst, dst_mask] = obj.blend() - % [dst, dst_mask] = obj.blend('OptionName',optionValue, ...) + % [dst, dst_mask] = obj.blend() + % [dst, dst_mask] = obj.blend('OptionName',optionValue, ...) % % ## Output % * __dst__ Final pano, of type `int16`. @@ -129,7 +133,7 @@ function feed(this, img, mask, tl) function [weight_maps, dst_roi] = createWeightMaps(this, masks, corners) %CREATEWEIGHTMAPS Creates weight maps for fixed set of source images by their masks and top-left corners % - % [weight_maps, dst_roi] = obj.createWeightMaps(masks, corners) + % [weight_maps, dst_roi] = obj.createWeightMaps(masks, corners) % % ## Input % * __masks__ Cell-array of image masks. @@ -151,8 +155,8 @@ function feed(this, img, mask, tl) function pyr = createLaplacePyr(img, num_levels, varargin) %CREATELAPLACEPYR Create Laplace pyramid % - % pyr = cv.Blender.createLaplacePyr(img, num_levels) - % pyr = cv.Blender.createLaplacePyr(..., 'OptionName',optionValue, ...) + % pyr = cv.Blender.createLaplacePyr(img, num_levels) + % pyr = cv.Blender.createLaplacePyr(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input image. @@ -172,8 +176,8 @@ function feed(this, img, mask, tl) function img = restoreImageFromLaplacePyr(pyr, varargin) %RESTOREIMAGEFROMLAPLACEPYR Restore source image from Laplace pyramid % - % img = cv.Blender.restoreImageFromLaplacePyr(pyr) - % img = cv.Blender.restoreImageFromLaplacePyr(pyr, 'OptionName',optionValue, ...) + % img = cv.Blender.restoreImageFromLaplacePyr(pyr) + % img = cv.Blender.restoreImageFromLaplacePyr(pyr, 'OptionName',optionValue, ...) % % ## Input % * __pyr__ Laplace pyramid, cell-array of matrices. @@ -192,7 +196,7 @@ function feed(this, img, mask, tl) function [roi,success] = overlapRoi(tl1, tl2, sz1, sz2) %OVERLAPROI Overlap ROI % - % [roi,success] = cv.Blender.overlapRoi(tl1, tl2, sz1, sz2) + % [roi,success] = cv.Blender.overlapRoi(tl1, tl2, sz1, sz2) % % ## Input % * __tl1__ First top-left corner `[x1,y1]`. @@ -212,7 +216,7 @@ function feed(this, img, mask, tl) function roi = resultRoi(corners, sizes) %RESULTROI Result ROI % - % roi = cv.Blender.resultRoi(corners, sizes) + % roi = cv.Blender.resultRoi(corners, sizes) % % ## Input % * __corners__ Cell-array of top-left corners `{[x,y],...}`. @@ -229,11 +233,11 @@ function feed(this, img, mask, tl) function roi = resultRoiIntersection(corners, sizes) %RESULTROIINTERSECTION Result ROI intersection % - % roi = cv.Blender.resultRoiIntersection(corners, sizes) + % roi = cv.Blender.resultRoiIntersection(corners, sizes) % % ## Input % * __corners__ Cell-array of top-left corners `{[x,y],...}`. - % * __sizes__ Cell-array of corresponding sizes `{[w,h],...}`. + % * __sizes__ Cell-array of corresponding sizes `{[w,h],...}`. % % ## Output % * __roi__ Output rectangle `[x,y,w,h]`. @@ -246,7 +250,7 @@ function feed(this, img, mask, tl) function tl = resultTl(corners) %RESULTTL Result top-left corner % - % tl = cv.Blender.resultTl(corners) + % tl = cv.Blender.resultTl(corners) % % ## Input % * __corners__ Cell-array of top-left corners `{[x,y],...}`. diff --git a/+cv/Boost.m b/+cv/Boost.m index fd1c74019..f83ef452d 100644 --- a/+cv/Boost.m +++ b/+cv/Boost.m @@ -74,7 +74,7 @@ % a particular iteration may be used again for learning some of the weak % classifiers further [FHT98]. % - % ### Prediction with Boost + % ### Prediction with Boost % % The cv.Boost.predict method should be used. Pass `RawOutput=true` to get % the raw sum from Boost classifier. @@ -185,11 +185,11 @@ % % * __Discrete__ Discrete AdaBoost. % * __Real__ Real AdaBoost. It is a technique that utilizes - % confidence-rated predictions and works well with categorical - % data. This is the default. + % confidence-rated predictions and works well with categorical data. + % This is the default. % * __Logit__ LogitBoost. It can produce good regression fits. % * __Gentle__ Gentle AdaBoost. It puts less weight on outlier data - % points and for that reason is often good with regression data. + % points and for that reason is often good with regression data. BoostType % The number of weak classifiers. % @@ -208,8 +208,8 @@ function this = Boost(varargin) %BOOST Creates/trains a new Boost model % - % model = cv.Boost() - % model = cv.Boost(...) + % model = cv.Boost() + % model = cv.Boost(...) % % The first variant creates an empty model. Use cv.Boost.train to % train the model, or cv.Boost.load to load a pre-trained model. @@ -228,7 +228,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.Boost % @@ -242,7 +242,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -258,11 +258,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.Boost.clear, cv.Boost.load % @@ -272,17 +272,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -296,23 +296,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -326,11 +325,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.Boost.save, cv.Boost.load % @@ -343,7 +342,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -356,7 +355,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -369,11 +368,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % If `BoostType='Discrete'` the problem is classification, and % regression otherwise. @@ -386,26 +385,26 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains a boosted tree classifier % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Row vectors of feature. % * __responses__ Output of the corresponding feature vectors. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ See the predict method. default false % * __CompressedInput__ See the predict method. default false % * __PredictSum__ See the predict method. default false @@ -413,90 +412,85 @@ function load(this, fname_or_str, varargin) % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -516,10 +510,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -533,14 +527,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -554,12 +547,12 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input row vectors (one or more) stored as rows of - % a floating-point matrix. + % a floating-point matrix. % % ## Output % * __results__ Output labels or regression values. @@ -567,30 +560,29 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % sum), not the class label. default false + % sum), not the class label. default false % * __CompressedInput__ compressed data, containing only the - % active samples/variables. default false + % active samples/variables. default false % * __PreprocessedInput__ This parameter is normally set to false, - % implying a regular input. If it is true, the method - % assumes that all the values of the discrete input - % variables have been already normalized to 0..NCategories - % ranges since the decision tree uses such normalized - % representation internally. It is useful for faster - % prediction with tree ensembles. For ordered input - % variables, the flag is not used. Default false + % implying a regular input. If it is true, the method assumes + % that all the values of the discrete input variables have been + % already normalized to 0..NCategories ranges since the decision + % tree uses such normalized representation internally. It is + % useful for faster prediction with tree ensembles. For ordered + % input variables, the flag is not used. Default false % * __PredictAuto__ Setting this to true, overrides all of the - % other `Predict*` flags. It automatically chooses between - % `PredictSum` and `PredictMaxVote` (if the model is a - % regressor or the number of classes are 2 with `RawOutput` - % set then it picks `PredictSum`, otherwise it picks - % `PredictMaxVote` by default). default true + % other `Predict*` flags. It automatically chooses between + % `PredictSum` and `PredictMaxVote` (if the model is a regressor + % or the number of classes are 2 with `RawOutput` set then it + % picks `PredictSum`, otherwise it picks `PredictMaxVote` by + % default). default true % * __PredictSum__ If true then return sum of votes instead of the - % class label. default false + % class label. default false % * __PredictMaxVote__ If true then return the class label with - % the max vote. default false + % the max vote. default false % % The method runs the sample through the trees in the ensemble and % returns the output class label based on the weighted voting. @@ -606,7 +598,7 @@ function load(this, fname_or_str, varargin) function roots = getRoots(this) %GETROOTS Returns indices of root nodes % - % roots = classifier.getRoots() + % roots = classifier.getRoots() % % ## Output % * __roots__ vector of indices. @@ -619,24 +611,22 @@ function load(this, fname_or_str, varargin) function nodes = getNodes(this) %GETNODES Returns all the nodes % - % nodes = classifier.getNodes() + % nodes = classifier.getNodes() % % ## Output % * __nodes__ Struct-array with the following fields: - % * __value__ Value at the node: a class label in case of - % classification or estimated function value in case - % of regression. - % * __classIdx__ Class index normalized to `0..class_count-1` - % range and assigned to the node. It is used - % internally in classification trees and tree - % ensembles. - % * __parent__ Index of the parent node. - % * __left__ Index of the left child node. - % * __right__ Index of right child node. - % * __defaultDir__ Default direction where to go (-1: left - % or +1: right). It helps in the case of missing - % values. - % * __split__ Index of the first split. + % * __value__ Value at the node: a class label in case of + % classification or estimated function value in case of + % regression. + % * __classIdx__ Class index normalized to `0..class_count-1` + % range and assigned to the node. It is used internally in + % classification trees and tree ensembles. + % * __parent__ Index of the parent node. + % * __left__ Index of the left child node. + % * __right__ Index of right child node. + % * __defaultDir__ Default direction where to go (-1 left or +1 + % right). It helps in the case of missing values. + % * __split__ Index of the first split. % % all the node indices are zero-based indices in the returned % vector. @@ -649,26 +639,25 @@ function load(this, fname_or_str, varargin) function splits = getSplits(this) %GETSPLITS Returns all the splits % - % splits = classifier.getSplits() + % splits = classifier.getSplits() % % ## Output % * __splits__ Struct-array with the following fields: - % * __varIdx__ Index of variable on which the split is - % created. - % * __inversed__ If true, then the inverse split rule is - % used (i.e. left and right branches are exchanged in - % the rule expressions below). - % * __quality__ The split quality, a positive number. It is - % used to choose the best split. (It is also used to - % compute variable importance). - % * __next__ Index of the next split in the list of splits - % for the node (surrogate splits). - % * __c__ The threshold value in case of split on an ordered - % variable. The rule is: - % `if var_value < c, next_node = left; else next_node = right; end` - % * __subsetOfs__ Offset of the bitset used by the split on - % a categorical variable. The rule is: - % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` + % * __varIdx__ Index of variable on which the split is created. + % * __inversed__ If true, then the inverse split rule is used + % (i.e. left and right branches are exchanged in the rule + % expressions below). + % * __quality__ The split quality, a positive number. It is used + % to choose the best split. (It is also used to compute + % variable importance). + % * __next__ Index of the next split in the list of splits for + % the node (surrogate splits). + % * __c__ The threshold value in case of split on an ordered + % variable. The rule is: + % `if var_value < c, next_node = left; else next_node = right; end` + % * __subsetOfs__ Offset of the bitset used by the split on a + % categorical variable. The rule is: + % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` % % all the split indices are zero-based indices in the returned % vector. @@ -681,7 +670,7 @@ function load(this, fname_or_str, varargin) function subsets = getSubsets(this) %GETSUBSETS Returns all the bitsets for categorical splits % - % subsets = classifier.getSubsets() + % subsets = classifier.getSubsets() % % ## Output % * __subsets__ vector of indices. diff --git a/+cv/BundleAdjuster.m b/+cv/BundleAdjuster.m index d865277fc..0d62c1686 100644 --- a/+cv/BundleAdjuster.m +++ b/+cv/BundleAdjuster.m @@ -5,7 +5,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -22,54 +23,47 @@ function this = BundleAdjuster(adjusterType, varargin) %BUNDLEADJUSTER Construct a bundle adjuster instance % - % obj = cv.BundleAdjuster(adjusterType) - % obj = cv.BundleAdjuster(adjusterType, 'OptionName',optionValue, ...) + % obj = cv.BundleAdjuster(adjusterType) + % obj = cv.BundleAdjuster(adjusterType, 'OptionName',optionValue, ...) % % ## Input % * __adjusterType__ Bundle adjustment cost function. One of: - % * __NoBundleAdjuster__ Stub bundle adjuster that does - % nothing. - % * __BundleAdjusterRay__ Implementation of the camera - % parameters refinement algorithm which minimizes sum - % of the distances between the rays passing through - % the camera center and a feature. - % It can estimate focal length. It ignores the - % refinement mask for now. - % * __BundleAdjusterReproj__ Implementation of the camera - % parameters refinement algorithm which minimizes sum - % of the reprojection error squares. - % It can estimate focal length, aspect ratio, - % principal point. You can affect only on them via the - % refinement mask. - % * __BundleAdjusterAffine__ Bundle adjuster that expects - % affine transformation represented in homogeneous - % coordinates in R for each camera param. Implements - % camera parameters refinement algorithm which - % minimizes sum of the reprojection error squares. - % It estimates all transformation parameters. - % Refinement mask is ignored. See also - % cv.AffineBasedEstimator, - % `AffineBestOf2NearestMatcher`. - % * __BundleAdjusterAffinePartial__ Bundle adjuster that - % expects affine transformation with 4 DOF represented - % in homogeneous coordinates in R for each camera - % param. Implements camera parameters refinement - % algorithm which minimizes sum of the reprojection - % error squares. - % It estimates all transformation parameters. - % Refinement mask is ignored. + % * __NoBundleAdjuster__ Stub bundle adjuster that does nothing. + % * __BundleAdjusterRay__ Implementation of the camera + % parameters refinement algorithm which minimizes sum of the + % distances between the rays passing through the camera center + % and a feature. It can estimate focal length. It ignores the + % refinement mask for now. + % * __BundleAdjusterReproj__ Implementation of the camera + % parameters refinement algorithm which minimizes sum of the + % reprojection error squares. It can estimate focal length, + % aspect ratio, principal point. You can affect only on them + % via the refinement mask. + % * __BundleAdjusterAffine__ Bundle adjuster that expects affine + % transformation represented in homogeneous coordinates in R + % for each camera param. Implements camera parameters + % refinement algorithm which minimizes sum of the reprojection + % error squares. It estimates all transformation parameters. + % Refinement mask is ignored. See also cv.AffineBasedEstimator + % and `AffineBestOf2NearestMatcher`. + % * __BundleAdjusterAffinePartial__ Bundle adjuster that expects + % affine transformation with 4 DOF represented in homogeneous + % coordinates in R for each camera param. Implements camera + % parameters refinement algorithm which minimizes sum of the + % the reprojection error squares. It estimates all + % transformation parameters. Refinement mask is ignored. % % ## Options % The following are options accepted by all adjusters: % % * __ConfThresh__ Threshold to filter out poorly matched image - % pairs. default 1 + % pairs. default 1 % * __RefinementMask__ 3x3 8-bit mask, where 0 means don't refine - % respective parameter, non-zero means refine. - % default `eye(3,'uint8')` + % respective parameter, non-zero means refine. + % default `eye(3,'uint8')` % * __TermCriteria__ Levenberg-Marquardt algorithm termination - % criteria. default - % `struct('type','Count+EPS', 'maxCount',1000, 'epsilon',eps)` + % criteria. default + % `struct('type','Count+EPS', 'maxCount',1000, 'epsilon',eps)` % % See also: cv.BundleAdjuster.estimate % @@ -79,7 +73,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BundleAdjuster % @@ -90,7 +84,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = BundleAdjuster_(this.id, 'typeid'); end @@ -101,15 +98,15 @@ function delete(this) function [cameras,success] = refine(this, features, pairwise_matches, cameras) %REFINE Refine camera parameters % - % cameras = obj.refine(features, pairwise_matches, cameras) - % [cameras,success] = obj.refine(...) + % cameras = obj.refine(features, pairwise_matches, cameras) + % [cameras,success] = obj.refine(...) % % ## Input % * __features__ Features of images. See cv.FeaturesFinder. - % * **pairwise_matches** Pairwise matches of images. - % See cv.FeaturesMatcher. - % * __cameras__ Initial camera parameters to refine. - % See cv.Estimator. + % * **pairwise_matches** Pairwise matches of images. See + % cv.FeaturesMatcher. + % * __cameras__ Initial camera parameters to refine. See + % cv.Estimator. % % ## Output % * __cameras__ Refined camera parameters. @@ -128,20 +125,20 @@ function delete(this) function rmats = waveCorrect(rmats, varargin) %WAVECORRECT Tries to make panorama more horizontal (or vertical) % - % rmats = cv.BundleAdjuster.waveCorrect(rmats) - % rmats = cv.BundleAdjuster.waveCorrect(rmats, 'OptionName',optionValue, ...) + % rmats = cv.BundleAdjuster.waveCorrect(rmats) + % rmats = cv.BundleAdjuster.waveCorrect(rmats, 'OptionName',optionValue, ...) % % ## Input % * __rmats__ Input camera rotation matrices. Cell-array of 3x3 - % matrices. + % matrices. % % ## Output % * __rmats__ Output camera rotation matrices, wave corrected. % % ## Options % * __Kind__ Wave correction kind, one of: - % * __Horiz__ horizontal (default) - % * __Vert__ vertical + % * __Horiz__ horizontal (default) + % * __Vert__ vertical % rmats = BundleAdjuster_(0, 'waveCorrect', rmats, varargin{:}); end diff --git a/+cv/CLAHE.m b/+cv/CLAHE.m index c55dd49fd..f13452590 100644 --- a/+cv/CLAHE.m +++ b/+cv/CLAHE.m @@ -1,7 +1,7 @@ %CLAHE Contrast Limited Adaptive Histogram Equalization % -% dst = cv.CLAHE(src) -% dst = cv.CLAHE(src, 'OptionName', optionValue, ...) +% dst = cv.CLAHE(src) +% dst = cv.CLAHE(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input 8-bit or 16-bit single-channel image. @@ -13,5 +13,5 @@ % * __ClipLimit__ default 40.0 % * __TileGridSize__ default [8,8] % -% See also: adapthisteq +% See also: adapthisteq, cv.equalizeHist % diff --git a/+cv/CalibrateDebevec.m b/+cv/CalibrateDebevec.m index e035b5e17..ad875dab0 100644 --- a/+cv/CalibrateDebevec.m +++ b/+cv/CalibrateDebevec.m @@ -18,7 +18,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -38,8 +39,8 @@ function this = CalibrateDebevec(varargin) %CALIBRATEDEBEVEC Creates CalibrateDebevec object % - % obj = cv.CalibrateDebevec() - % obj = cv.CalibrateDebevec('OptionName',optionValue, ...) + % obj = cv.CalibrateDebevec() + % obj = cv.CalibrateDebevec('OptionName',optionValue, ...) % % ## Options % * __Samples__ default 70 @@ -54,7 +55,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.CalibrateDebevec % @@ -68,17 +69,17 @@ function delete(this) function dst = process(this, src, etimes) %PROCESS Recovers inverse camera response % - % dst = obj.process(src, etimes) + % dst = obj.process(src, etimes) % % ## Input % * __src__ cell array of input images, all of the same size and - % `uint8` type. + % `uint8` type. % * __etimes__ vector of exposure time values for each image. % % ## Output % * __dst__ 256x1xCN `single` matrix with inverse camera response - % function. It has the same number of channels as images - % `src{i}`. + % function. It has the same number of channels as images + % `src{i}`. % % See also: cv.CalibrateDebevec.CalibrateDebevec % @@ -91,7 +92,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.CalibrateDebevec.empty, cv.CalibrateDebevec.load % @@ -101,11 +102,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.CalibrateDebevec.clear, cv.CalibrateDebevec.load % @@ -115,11 +116,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.CalibrateDebevec.save, cv.CalibrateDebevec.load % @@ -129,7 +130,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -145,21 +146,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/CalibrateRobertson.m b/+cv/CalibrateRobertson.m index 52b057bac..76da147a4 100644 --- a/+cv/CalibrateRobertson.m +++ b/+cv/CalibrateRobertson.m @@ -18,7 +18,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -34,8 +35,8 @@ function this = CalibrateRobertson(varargin) %CALIBRATEROBERTSON Creates CalibrateRobertson object % - % obj = cv.CalibrateRobertson() - % obj = cv.CalibrateRobertson('OptionName',optionValue, ...) + % obj = cv.CalibrateRobertson() + % obj = cv.CalibrateRobertson('OptionName',optionValue, ...) % % ## Options % * __MaxIter__ default 30 @@ -49,7 +50,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.CalibrateRobertson % @@ -60,11 +61,11 @@ function delete(this) function radiance = getRadiance(this) %GETRADIANCE Get Radiance matrix % - % radiance = obj.getRadiance() + % radiance = obj.getRadiance() % % ## Output % * __radiance__ radiance matrix, same size as input images and - % `single` type. + % `single` type. % radiance = CalibrateRobertson_(this.id, 'getRadiance'); end @@ -75,17 +76,17 @@ function delete(this) function dst = process(this, src, etimes) %PROCESS Recovers inverse camera response % - % dst = obj.process(src, etimes) + % dst = obj.process(src, etimes) % % ## Input % * __src__ cell array of input images, all of the same size and - % `uint8` type. + % `uint8` type. % * __etimes__ vector of exposure time values for each image. % % ## Output % * __dst__ 256x1xCN `single` matrix with inverse camera response - % function. It has the same number of channels as images - % `src{i}`. + % function. It has the same number of channels as images + % `src{i}`. % % See also: cv.CalibrateRobertson.CalibrateRobertson % @@ -98,7 +99,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.CalibrateRobertson.empty, cv.CalibrateRobertson.load % @@ -108,11 +109,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.CalibrateRobertson.clear, cv.CalibrateRobertson.load % @@ -122,11 +123,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.CalibrateRobertson.save, cv.CalibrateRobertson.load % @@ -136,7 +137,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -152,21 +153,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/CamShift.m b/+cv/CamShift.m index de211c18a..b4ea828aa 100644 --- a/+cv/CamShift.m +++ b/+cv/CamShift.m @@ -1,23 +1,23 @@ %CAMSHIFT Finds an object center, size, and orientation % -% box = cv.CamShift(probImage, window) -% [box,window] = cv.CamShift(probImage, window) -% [...] = cv.CamShift(..., 'OptionName', optionValue, ...) +% box = cv.CamShift(probImage, window) +% [box,window] = cv.CamShift(probImage, window) +% [...] = cv.CamShift(..., 'OptionName', optionValue, ...) % % ## Input % * __probImage__ Back projection of the object histogram. See -% cv.calcBackProject for details. +% cv.calcBackProject for details. % * __window__ Initial search window `[x,y,w,h]`. % % ## Output % * __box__ Output rectangle with rotation. A scalar structure of the form: -% `struct('center',[x,y], 'size',[w,h], 'angle',a)` +% `struct('center',[x,y], 'size',[w,h], 'angle',a)` % * __window__ Converged CAMSHIFT window `[x,y,w,h]` % % ## Options % * __Criteria__ Stop criteria for the underlying cv.meanShift. Accepts a -% struct with 'type', 'maxCount', and 'epsilon' fields. -% Default `struct('type','Count+EPS', 'maxCount',100, 'epsilon',1.0)` +% struct with 'type', 'maxCount', and 'epsilon' fields. Default +% `struct('type','Count+EPS', 'maxCount',100, 'epsilon',1.0)` % % The function implements the CAMSHIFT object tracking algrorithm % [Bradski98]. First, it finds an object center using cv.meanShift and then diff --git a/+cv/Canny.m b/+cv/Canny.m index a547be406..31b01d6a9 100644 --- a/+cv/Canny.m +++ b/+cv/Canny.m @@ -1,24 +1,23 @@ %CANNY Finds edges in an image using the Canny algorithm % -% edges = cv.Canny(image, thresh) -% edges = cv.Canny(image, thresh, 'OptionName', optionValue, ...) +% edges = cv.Canny(image, thresh) +% edges = cv.Canny(image, thresh, 'OptionName', optionValue, ...) % % ## Input % * __image__ 8-bit input image (grayscale or color image). % * __thresh__ Threshold for the hysteresis procedure. Scalar or 2-element -% vector `[low_thresh,high_thresh]`. +% vector `[low_thresh,high_thresh]`. % % ## Output -% * __edges__ Output edge map; single channels 8-bit image, which has the -% same size as `image`. +% * __edges__ Output edge map; single channels 8-bit image, which has the same +% size as `image`. % % ## Options % * __ApertureSize__ Aperture size for the Sobel operator. Default 3 % * __L2Gradient__ Flag indicating whether a more accurate L2 norm -% `sqrt((dI/dx)^2 + (dI/dy)^2)` should be used to compute the image -% gradient magnitude (`L2gradient=true`), or whether the default L1 norm -% `abs(dI/dx) + abs(dI/dy)` is enough (`L2gradient=false`). -% Default false +% `sqrt((dI/dx)^2 + (dI/dy)^2)` should be used to compute the image gradient +% magnitude (`L2gradient=true`), or whether the default L1 norm +% `abs(dI/dx) + abs(dI/dy)` is enough (`L2gradient=false`). Default false % % The function finds edges in the input image `image` and marks them in the % output map `edges` using the Canny algorithm. When `thresh` is 2-element @@ -26,7 +25,7 @@ % largest value is used to find initial segments of strong edges. When % `thresh` is a scalar, it is treated as a higher threshold value and % `0.4*thresh` is used for the lower threshold. See -% [Canny edge detector](http://en.wikipedia.org/wiki/Canny_edge_detector). +% [Canny edge detector](https://en.wikipedia.org/wiki/Canny_edge_detector). % % ## References % [Canny86]: diff --git a/+cv/Canny2.m b/+cv/Canny2.m index 029e92055..586cf3ddf 100644 --- a/+cv/Canny2.m +++ b/+cv/Canny2.m @@ -1,24 +1,23 @@ %CANNY2 Finds edges in an image using the Canny algorithm with custom image gradient % -% edges = cv.Canny2(dx, dy, thresh) -% edges = cv.Canny2(dx, dy, thresh, 'OptionName', optionValue, ...) +% edges = cv.Canny2(dx, dy, thresh) +% edges = cv.Canny2(dx, dy, thresh, 'OptionName', optionValue, ...) % % ## Input % * __dx__ 16-bit x derivative of input image (1 or 3 channels of type `int16`). % * __dy__ 16-bit y derivative of input image (same size and type as `dx`). % * __thresh__ Threshold for the hysteresis procedure. Scalar or 2-element -% vector `[low_thresh,high_thresh]`. +% vector `[low_thresh,high_thresh]`. % % ## Output -% * __edges__ Output edge map; single channels 8-bit image, which has the -% same size as the input image. +% * __edges__ Output edge map; single channels 8-bit image, which has the same +% size as the input image. % % ## Options % * __L2Gradient__ Flag indicating whether a more accurate L2 norm -% `sqrt((dI/dx)^2 + (dI/dy)^2)` should be used to compute the image -% gradient magnitude (`L2gradient=true`), or whether the default L1 norm -% `abs(dI/dx) + abs(dI/dy)` is enough (`L2gradient=false`). -% Default false +% `sqrt((dI/dx)^2 + (dI/dy)^2)` should be used to compute the image gradient +% magnitude (`L2gradient=true`), or whether the default L1 norm +% `abs(dI/dx) + abs(dI/dy)` is enough (`L2gradient=false`). Default false % % See also: cv.Canny, cv.Sobel, cv.Scharr % diff --git a/+cv/CascadeClassifier.m b/+cv/CascadeClassifier.m index 0e062ac6e..bcbdc859a 100644 --- a/+cv/CascadeClassifier.m +++ b/+cv/CascadeClassifier.m @@ -40,7 +40,7 @@ % % See image: % - % ![image](http://docs.opencv.org/3.1.0/haarfeatures.png) + % ![image](https://docs.opencv.org/3.3.1/haarfeatures.png) % % The feature used in a particular classifier is specified by its shape % (1a, 2b etc.), position within the region of interest and the scale @@ -60,21 +60,21 @@ % cascade of boosted classifiers from a set of samples. This is not % included in mexopencv. % - % ## Note + % ### Note % In the new interface it is also possible to use LBP (local binary % pattern) features in addition to Haar-like features. % % ## Example % The usage example is shown in the following: % - % xmlfile = fullfile(mexopencv.root(),'test','haarcascade_frontalface_alt2.xml'); - % cc = cv.CascadeClassifier(xmlfile); - % im = imread(fullfile(mexopencv.root(),'test','lena.jpg')); - % boxes = cc.detect(im); - % for i=1:numel(boxes) - % im = cv.rectangle(im, boxes{i}, 'Color',[0 255 0], 'Thickness',2); - % end - % imshow(im) + % xmlfile = fullfile(mexopencv.root(),'test','haarcascade_frontalface_alt2.xml'); + % cc = cv.CascadeClassifier(xmlfile); + % im = imread(fullfile(mexopencv.root(),'test','lena.jpg')); + % boxes = cc.detect(im); + % for i=1:numel(boxes) + % im = cv.rectangle(im, boxes{i}, 'Color',[0 255 0], 'Thickness',2); + % end + % imshow(im) % % ## References % [Viola01]: @@ -101,13 +101,13 @@ function this = CascadeClassifier(filename) %CASCADECLASSIFIER Creates a new cascade classifier object % - % classifier = cv.CascadeClassifier() - % classifier = cv.CascadeClassifier(filename) + % classifier = cv.CascadeClassifier() + % classifier = cv.CascadeClassifier(filename) % % ## Input % * __filename__ Name of the XML file from which the trained - % classifier is loaded. This is handled by the - % cv.CascadeClassifier.load method. + % classifier is loaded. This is handled by the + % cv.CascadeClassifier.load method. % % Supports HAAR and LBP cascades. % @@ -122,7 +122,7 @@ function delete(this) %DELETE Destructor % - % classifier.delete() + % classifier.delete() % % See also: cv.CascadeClassifier % @@ -133,7 +133,7 @@ function delete(this) function tf = empty(this) %EMPTY Checks whether the classifier has been loaded % - % tf = classifier.empty() + % tf = classifier.empty() % % ## Output % * __tf__ a logical value indicating empty object when true. @@ -146,14 +146,14 @@ function delete(this) function status = load(this, filename) %LOAD Loads a classifier from a file % - % classifier.load(filename) - % status = classifier.load(filename) + % classifier.load(filename) + % status = classifier.load(filename) % % ## Input % * __filename__ Name of the file from which the classifier is - % loaded. The file may contain an old HAAR classifier - % trained by the `haartraining` application or a new cascade - % classifier trained by the `traincascade` application. + % loaded. The file may contain an old HAAR classifier trained by + % the `haartraining` application or a new cascade classifier + % trained by the `traincascade` application. % % ## Output % * __status__ a logical value indicating success of load. @@ -166,7 +166,7 @@ function delete(this) function tf = isOldFormatCascade(this) %ISOLDFORMATCASCADE Check if loaded classifer is from the old format % - % tf = classifier.isOldFormatCascade() + % tf = classifier.isOldFormatCascade() % % ## Output % * __tf__ true if old format, false if new format classifier @@ -179,7 +179,7 @@ function delete(this) function ftype = getFeatureType(this) %GETFEATURETYPE Get features type % - % ftype = classifier.getFeatureType() + % ftype = classifier.getFeatureType() % % ## Output % * __ftype__ @@ -191,7 +191,7 @@ function delete(this) function winsiz = getOriginalWindowSize(this) %GETORIGINALWINDOWSIZE Get original window size % - % winsiz = classifier.getOriginalWindowSize() + % winsiz = classifier.getOriginalWindowSize() % % ## Output % * __winsiz__ @@ -202,11 +202,11 @@ function delete(this) function S = getMaskGenerator(this) %GETMASKGENERATOR Get the current mask generator function % - % S = classifier.getMaskGenerator() + % S = classifier.getMaskGenerator() % % ## Output % * __S__ a struct containing with the following fields: - % * __fun__ the name of the mask generator MATLAB function. + % * __fun__ the name of the mask generator MATLAB function. % % See also: cv.CascadeClassifier.setMaskGenerator % @@ -216,12 +216,12 @@ function delete(this) function setMaskGenerator(this, maskgenFcn) %SETMASKGENERATOR Set the current mask generator function % - % classifier.setMaskGenerator(maskgenFcn) + % classifier.setMaskGenerator(maskgenFcn) % % ## Input % * __maskgenFcn__ name of a MATLAB M-function that generates - % mask. It also accepts the special name of - % 'FaceDetectionMaskGenerator'. + % mask. It also accepts the special name of + % 'FaceDetectionMaskGenerator'. % % This only works with the new format cascade classifiers. % @@ -235,52 +235,52 @@ function setMaskGenerator(this, maskgenFcn) function [boxes, varargout] = detect(this, im, varargin) %DETECT Detects objects of different sizes in the input image % - % boxes = classifier.detect(im) - % [boxes, numDetections] = classifier.detect(im) - % [boxes, rejectLevels, levelWeights] = classifier.detect(im) - % [...] = classifier.detect(im, 'OptionName', optionValue, ...) + % boxes = classifier.detect(im) + % [boxes, numDetections] = classifier.detect(im) + % [boxes, rejectLevels, levelWeights] = classifier.detect(im) + % [...] = classifier.detect(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Matrix of the type `uint8` containing an image where - % objects are detected. + % objects are detected. % % ## Output % * __boxes__ Cell array of rectangles where each rectangle - % contains the detected object, the rectangles may be - % partially outside the original image. + % contains the detected object, the rectangles may be partially + % outside the original image. % * __numDetections__ optional vector of detection numbers for the - % corresponding objects. An object's number of detections is - % the number of neighboring positively classified rectangles - % that were joined together to form the object. + % corresponding objects. An object's number of detections is the + % number of neighboring positively classified rectangles that + % were joined together to form the object. % * __rejectLevels__ optional output vector of integers. Implies - % `OutputRejectLevels=true`. + % `OutputRejectLevels=true`. % * __levelWeights__ optional output vector of doubles. Implies - % `OutputRejectLevels=true`. + % `OutputRejectLevels=true`. % % ## Options % * __ScaleFactor__ Parameter specifying how much the image size - % is reduced at each image scale. default 1.1 + % is reduced at each image scale. default 1.1 % * __MinNeighbors__ Parameter specifying how many neighbors each - % candiate rectangle should have to retain it. default 3 + % candiate rectangle should have to retain it. default 3 % * __MinSize__ Minimum possible object size. Objects smaller than - % that are ignored. Not set by default. + % that are ignored. Not set by default. % * __MaxSize__ Maximum possible object size. Objects larger than - % that are ignored. If `MaxSize == MinSize` model is - % evaluated on single scale. Not set by default. + % that are ignored. If `MaxSize == MinSize` model is evaluated + % on single scale. Not set by default. % * __OutputRejectLevels__ if is true returns `rejectLevels` and - % `levelWeights`. default false + % `levelWeights`. default false % * __DoCannyPruning__ Parameter with the same meaning for an old - % cascade as in the function `cvHaarDetectObjects`. It is - % not used for a new cascade. default false + % cascade as in the function `cvHaarDetectObjects`. It is not + % used for a new cascade. default false % * __ScaleImage__ Parameter with the same meaning for an old - % cascade as in the function `cvHaarDetectObjects`. It is - % not used for a new cascade. default false + % cascade as in the function `cvHaarDetectObjects`. It is not + % used for a new cascade. default false % * __FindBiggestObject__ Parameter with the same meaning for an - % old cascade as in the function `cvHaarDetectObjects`. It - % is not used for a new cascade. default false + % old cascade as in the function `cvHaarDetectObjects`. It is + % not used for a new cascade. default false % * __DoRoughSearch__ Parameter with the same meaning for an old - % cascade as in the function `cvHaarDetectObjects`. It is - % not used for a new cascade. default false + % cascade as in the function `cvHaarDetectObjects`. It is not + % used for a new cascade. default false % % The detected objects are returned as a cell array of rectangles. % Note that the function has three variants based on the number of @@ -297,9 +297,9 @@ function setMaskGenerator(this, maskgenFcn) % strong from weaker classifications. % A code sample on how to use it efficiently can be found below: % - % model = cv.CascadeClassifier('/path/to/your/model.xml'); - % [boxes, levels, weights] = model.detect(img, 'OutputRejectLevels',true); - % fprintf('Detection [%d,%d,%d,%d] with weight %f\n', boxes{1}, weights(1)); + % model = cv.CascadeClassifier('/path/to/your/model.xml'); + % [boxes, levels, weights] = model.detect(img, 'OutputRejectLevels',true); + % fprintf('Detection [%d,%d,%d,%d] with weight %f\n', boxes{1}, weights(1)); % % See also: cv.CascadeClassifier.CascadeClassifier % @@ -312,8 +312,8 @@ function setMaskGenerator(this, maskgenFcn) function status = convert(oldcascade, newcascade) %CONVERT Convert classifier file from the old format to the new format % - % cv.CascadeClassifier.convert(oldcascade, newcascade) - % status = cv.CascadeClassifier.convert(oldcascade, newcascade) + % cv.CascadeClassifier.convert(oldcascade, newcascade) + % status = cv.CascadeClassifier.convert(oldcascade, newcascade) % % ## Input % * __oldcascade__ name of classifier file to read in old format diff --git a/+cv/ConjGradSolver.m b/+cv/ConjGradSolver.m index ac375b766..3caf12c58 100644 --- a/+cv/ConjGradSolver.m +++ b/+cv/ConjGradSolver.m @@ -6,7 +6,7 @@ % beautifully clear explanatory article [1]. % % The method can be seen as an adaptation of a standard - % [Conjugate Gradient method](http://en.wikipedia.org/wiki/Conjugate_gradient_method) + % [Conjugate Gradient method](https://en.wikipedia.org/wiki/Conjugate_gradient_method) % for numerically solving the systems of linear equations. % % It should be noted, that this method, although deterministic, is rather @@ -41,7 +41,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -52,7 +53,7 @@ % * __fun__ name of M-file that evaluates the objective function. % * __gradfun__ name of M-file that evaluates the gradient. default '' % * __gradeps__ used by finite difference method, in case `gradfun` - % was not supplied. default 1e-3 + % was not supplied. default 1e-3 % % It should be set before the call to cv.ConjGradSolver.minimize, % as default value is not usable. @@ -74,29 +75,29 @@ function this = ConjGradSolver(varargin) %CONJGRADSOLVER Creates an ConjGradSolver object % - % solver = cv.ConjGradSolver() - % solver = cv.ConjGradSolver('OptionName', optionValue, ...) + % solver = cv.ConjGradSolver() + % solver = cv.ConjGradSolver('OptionName', optionValue, ...) % % ## Options % * __Function__ Objective function that will be minimized, - % specified as a structure with the following fields - % (`gradfun` and `gradeps` are optional fields): - % * __dims__ Number of dimensions - % * __fun__ string, name of M-file that implements the - % `calc` method. It should receive a vector of the - % specified dimension, and return a scalar value of - % the objective function evaluated at that point. - % * __gradfun__ string, name of M-file that implements the - % `getGradient` method. It should receive an `ndims` - % vector, and return a vector of partial derivatives. - % If an empty string is specified (default), the - % gradient is approximated using finite difference - % method as: `F'(x) = (F(x+h) - F(x-h)) / 2*h` (at the - % cost of exta function evaluations and less accuracy). - % * __gradeps__ gradient step `h` used in finite difference - % method. default 1e-3 + % specified as a structure with the following fields (`gradfun` + % and `gradeps` are optional fields): + % * __dims__ Number of dimensions + % * __fun__ string, name of M-file that implements the `calc` + % method. It should receive a vector of the specified + % dimension, and return a scalar value of the objective + % function evaluated at that point. + % * __gradfun__ string, name of M-file that implements the + % `getGradient` method. It should receive an `ndims` vector, + % and return a vector of partial derivatives. If an empty + % string is specified (default), the gradient is approximated + % using finite difference method as: + % `F'(x) = (F(x+h) - F(x-h)) / 2*h` (at the cost of exta + % function evaluations and less accuracy). + % * __gradeps__ gradient step `h` used in finite difference + % method. default 1e-3 % * __TermCriteria__ Terminal criteria to the algorithm. default - % `struct('type','Count+EPS', 'maxCount',5000, 'epsilon',1e-6)` + % `struct('type','Count+EPS', 'maxCount',5000, 'epsilon',1e-6)` % % All the parameters are optional, so this procedure can be called % even without parameters at all. In this case, the default values @@ -114,7 +115,7 @@ function delete(this) %DELETE Destructor % - % solver.delete() + % solver.delete() % % See also: cv.ConjGradSolver % @@ -125,16 +126,16 @@ function delete(this) function [x,fx] = minimize(this, x0) %MINIMIZE Runs the algorithm and performs the minimization % - % [x,fx] = solver.minimize(x0) + % [x,fx] = solver.minimize(x0) % % ## Input % * __x0__ The initial point, that will become a centroid of an - % initial simplex. + % initial simplex. % % ## Output % * __x__ After the algorithm will terminate, it will be setted - % to the point where the algorithm stops, the point of - % possible minimum. + % to the point where the algorithm stops, the point of possible + % minimum. % * __fx__ The value of a function at the point found. % % The sole input parameter determines the centroid of the starting @@ -152,7 +153,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % solver.clear() + % solver.clear() % % See also: cv.ConjGradSolver.empty % @@ -162,11 +163,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = solver.empty() + % b = solver.empty() % % ## Output - % * __b__ returns true of the algorithm is empty - % (e.g. in the very beginning or after unsuccessful read). + % * __b__ returns true of the algorithm is empty (e.g. in the very + % beginning or after unsuccessful read). % % See also: cv.ConjGradSolver.clear % @@ -176,11 +177,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = solver.getDefaultName() + % name = solver.getDefaultName() % % ## Output - % * __name__ This string is used as top level xml/yml node tag - % when the object is saved to a file or string. + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. % % See also: cv.ConjGradSolver.save, cv.ConjGradSolver.load % @@ -190,7 +191,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -205,21 +206,21 @@ function save(this, filename) function load(this, fname_or_str) %LOAD Loads algorithm from a file or a string % - % obj.load(filename) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input - % * __filename__ Name of the file to read. + % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/DTrees.m b/+cv/DTrees.m index 4fe90085d..90dfc78c9 100644 --- a/+cv/DTrees.m +++ b/+cv/DTrees.m @@ -36,17 +36,17 @@ % following variables are possible: % % * **Ordered variables**. The variable value is compared with a threshold - % that is also stored in the node. If the value is less than the - % threshold, the procedure goes to the left. Otherwise, it goes to the - % right. For example, if the weight is less than 1 kilogram, the procedure - % goes to the left, else to the right. + % that is also stored in the node. If the value is less than the + % threshold, the procedure goes to the left. Otherwise, it goes to the + % right. For example, if the weight is less than 1 kilogram, the + % procedure goes to the left, else to the right. % % * **Categorical variables**. A discrete variable value is tested to see - % whether it belongs to a certain subset of values (also stored in the - % node) from a limited set of values the variable could take. If it does, - % the procedure goes to the left. Otherwise, it goes to the right. For - % example, if the color is green or red, go to the left, else to the - % right. + % whether it belongs to a certain subset of values (also stored in the + % node) from a limited set of values the variable could take. If it + % does, the procedure goes to the left. Otherwise, it goes to the right. + % For example, if the color is green or red, go to the left, else to the + % right. % % So, in each node, a pair of entities (`variable_index`, % `decision_rule (threshold/subset)`) is used. This pair is called a split @@ -209,8 +209,8 @@ function this = DTrees(varargin) %DTREES Creates/trains a new decision tree model % - % model = cv.DTrees() - % model = cv.DTrees(...) + % model = cv.DTrees() + % model = cv.DTrees(...) % % The first variant creates an empty decision tree with the % default parameters. It should be then trained using the train @@ -236,7 +236,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.DTrees % @@ -250,7 +250,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -266,11 +266,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.DTrees.clear, cv.DTrees.load % @@ -280,17 +280,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -304,23 +304,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -334,11 +333,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DTrees.save, cv.DTrees.load % @@ -351,7 +350,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -364,7 +363,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -377,11 +376,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % See also: cv.DTrees.isTrained % @@ -391,112 +390,107 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains a decision tree % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Row vectors of feature. % * __responses__ Output of the corresponding feature vectors. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -523,10 +517,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -540,14 +534,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -561,12 +554,12 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input row vectors (one or more) stored as rows of - % a floating-point matrix. + % a floating-point matrix. % % ## Output % * __results__ Output labels or regression values. @@ -574,30 +567,29 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % sum), not the class label. default false + % sum), not the class label. default false % * __CompressedInput__ compressed data, containing only the - % active samples/variables. default false + % active samples/variables. default false % * __PreprocessedInput__ This parameter is normally set to false, - % implying a regular input. If it is true, the method - % assumes that all the values of the discrete input - % variables have been already normalized to 0..NCategories - % ranges since the decision tree uses such normalized - % representation internally. It is useful for faster - % prediction with tree ensembles. For ordered input - % variables, the flag is not used. Default false + % implying a regular input. If it is true, the method assumes + % that all the values of the discrete input variables have been + % already normalized to 0..NCategories ranges since the decision + % tree uses such normalized representation internally. It is + % useful for faster prediction with tree ensembles. For ordered + % input variables, the flag is not used. Default false % * __PredictAuto__ Setting this to true, overrides all of the - % other `Predict*` flags. It automatically chooses between - % `PredictSum` and `PredictMaxVote` (if the model is a - % regressor or the number of classes are 2 with `RawOutput` - % set then it picks `PredictSum`, otherwise it picks - % `PredictMaxVote` by default). default true + % other `Predict*` flags. It automatically chooses between + % `PredictSum` and `PredictMaxVote` (if the model is a regressor + % or the number of classes are 2 with `RawOutput` set then it + % picks `PredictSum`, otherwise it picks `PredictMaxVote` by + % default). default true % * __PredictSum__ If true then return sum of votes instead of the - % class label. default false + % class label. default false % * __PredictMaxVote__ If true then return the class label with - % the max vote. default false + % the max vote. default false % % The method traverses the decision tree and returns rhe % prediction result from the reached leaf node, either the class @@ -614,7 +606,7 @@ function load(this, fname_or_str, varargin) function roots = getRoots(this) %GETROOTS Returns indices of root nodes % - % roots = classifier.getRoots() + % roots = classifier.getRoots() % % ## Output % * __roots__ vector of indices. @@ -627,24 +619,22 @@ function load(this, fname_or_str, varargin) function nodes = getNodes(this) %GETNODES Returns all the nodes % - % nodes = classifier.getNodes() + % nodes = classifier.getNodes() % % ## Output % * __nodes__ Struct-array with the following fields: - % * __value__ Value at the node: a class label in case of - % classification or estimated function value in case - % of regression. - % * __classIdx__ Class index normalized to `0..class_count-1` - % range and assigned to the node. It is used - % internally in classification trees and tree - % ensembles. - % * __parent__ Index of the parent node. - % * __left__ Index of the left child node. - % * __right__ Index of right child node. - % * __defaultDir__ Default direction where to go (-1: left - % or +1: right). It helps in the case of missing - % values. - % * __split__ Index of the first split. + % * __value__ Value at the node: a class label in case of + % classification or estimated function value in case of + % regression. + % * __classIdx__ Class index normalized to `0..class_count-1` + % range and assigned to the node. It is used internally in + % classification trees and tree ensembles. + % * __parent__ Index of the parent node. + % * __left__ Index of the left child node. + % * __right__ Index of right child node. + % * __defaultDir__ Default direction where to go (-1 left or +1 + % right). It helps in the case of missing values. + % * __split__ Index of the first split. % % all the node indices are zero-based indices in the returned % vector. @@ -657,26 +647,25 @@ function load(this, fname_or_str, varargin) function splits = getSplits(this) %GETSPLITS Returns all the splits % - % splits = classifier.getSplits() + % splits = classifier.getSplits() % % ## Output % * __splits__ Struct-array with the following fields: - % * __varIdx__ Index of variable on which the split is - % created. - % * __inversed__ If true, then the inverse split rule is - % used (i.e. left and right branches are exchanged in - % the rule expressions below). - % * __quality__ The split quality, a positive number. It is - % used to choose the best split. (It is also used to - % compute variable importance). - % * __next__ Index of the next split in the list of splits - % for the node (surrogate splits). - % * __c__ The threshold value in case of split on an ordered - % variable. The rule is: - % `if var_value < c, next_node = left; else next_node = right; end` - % * __subsetOfs__ Offset of the bitset used by the split on - % a categorical variable. The rule is: - % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` + % * __varIdx__ Index of variable on which the split is created. + % * __inversed__ If true, then the inverse split rule is used + % (i.e. left and right branches are exchanged in the rule + % expressions below). + % * __quality__ The split quality, a positive number. It is used + % to choose the best split. (It is also used to compute + % variable importance). + % * __next__ Index of the next split in the list of splits for + % the node (surrogate splits). + % * __c__ The threshold value in case of split on an ordered + % variable. The rule is: + % `if var_value < c, next_node = left; else next_node = right; end` + % * __subsetOfs__ Offset of the bitset used by the split on a + % categorical variable. The rule is: + % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` % % all the split indices are zero-based indices in the returned % vector. @@ -689,7 +678,7 @@ function load(this, fname_or_str, varargin) function subsets = getSubsets(this) %GETSUBSETS Returns all the bitsets for categorical splits % - % subsets = classifier.getSubsets() + % subsets = classifier.getSubsets() % % ## Output % * __subsets__ vector of indices. diff --git a/+cv/DescriptorExtractor.m b/+cv/DescriptorExtractor.m index eaf95e0a7..ddbb4f299 100644 --- a/+cv/DescriptorExtractor.m +++ b/+cv/DescriptorExtractor.m @@ -18,43 +18,45 @@ % % ## Example % - % detector = cv.FeatureDetector('ORB'); - % keypoints = detector.detect(img); - % extractor = cv.DescriptorExtractor('ORB'); - % descriptors = extractor.compute(img, keypoints); + % detector = cv.FeatureDetector('ORB'); + % keypoints = detector.detect(img); + % extractor = cv.DescriptorExtractor('ORB'); + % descriptors = extractor.compute(img, keypoints); % % See also: cv.FeatureDetector, cv.DescriptorMatcher, % cv.BOWImgDescriptorExtractor, extractFeatures, binaryFeatures % properties (SetAccess = private) - id % Object ID - Type % Type of the extractor + % Object ID + id + % Type of the extractor + Type end methods function this = DescriptorExtractor(extractorType, varargin) %DESCRIPTOREXTRACTOR Creates a descriptor extractor by name % - % extractor = cv.DescriptorExtractor(type) - % extractor = cv.DescriptorExtractor(type, 'OptionName',optionValue, ...) + % extractor = cv.DescriptorExtractor(type) + % extractor = cv.DescriptorExtractor(type, 'OptionName',optionValue, ...) % % ## Input % * __type__ The following extractor types are supported: - % * __BRISK__ see cv.BRISK - % * __ORB__ see cv.ORB (default) - % * __KAZE__ see cv.KAZE - % * __AKAZE__ see cv.AKAZE - % * __SIFT__ see cv.SIFT (requires `xfeatures2d` module) - % * __SURF__ see cv.SURF (requires `xfeatures2d` module) - % * __FREAK__ see cv.FREAK (requires `xfeatures2d` module) - % * __BriefDescriptorExtractor__ see cv.BriefDescriptorExtractor - % (requires `xfeatures2d` module) - % * __LUCID__ see cv.LUCID (requires `xfeatures2d` module) - % * __LATCH__ see cv.LATCH (requires `xfeatures2d` module) - % * __DAISY__ see cv.DAISY (requires `xfeatures2d` module) - % * __VGG__ see cv.VGG (requires `xfeatures2d` module) - % * __BoostDesc__ see cv.BoostDesc (requires `xfeatures2d` module) + % * __BRISK__ see cv.BRISK + % * __ORB__ see cv.ORB (default) + % * __KAZE__ see cv.KAZE + % * __AKAZE__ see cv.AKAZE + % * __SIFT__ see cv.SIFT (requires `xfeatures2d` module) + % * __SURF__ see cv.SURF (requires `xfeatures2d` module) + % * __FREAK__ see cv.FREAK (requires `xfeatures2d` module) + % * __BriefDescriptorExtractor__ see cv.BriefDescriptorExtractor + % (requires `xfeatures2d` module) + % * __LUCID__ see cv.LUCID (requires `xfeatures2d` module) + % * __LATCH__ see cv.LATCH (requires `xfeatures2d` module) + % * __DAISY__ see cv.DAISY (requires `xfeatures2d` module) + % * __VGG__ see cv.VGG (requires `xfeatures2d` module) + % * __BoostDesc__ see cv.BoostDesc (requires `xfeatures2d` module) % % ## Options % Refer to the constructors of each descriptor extractor for a @@ -70,7 +72,7 @@ function delete(this) %DELETE Destructor % - % extractor.delete() + % extractor.delete() % % See also: cv.DescriptorExtractor % @@ -81,7 +83,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = extractor.typeid() + % typename = extractor.typeid() % % ## Output % * __typename__ Name of C++ type @@ -95,7 +97,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % extractor.clear() + % extractor.clear() % % See also: cv.DescriptorExtractor.empty, % cv.DescriptorExtractor.load @@ -106,11 +108,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = extractor.empty() + % b = extractor.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.DescriptorExtractor.clear, % cv.DescriptorExtractor.load @@ -121,7 +123,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % extractor.save(filename) + % extractor.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -137,21 +139,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % extractor.load(fname) - % extractor.load(str, 'FromString',true) - % extractor.load(..., 'OptionName',optionValue, ...) + % extractor.load(fname) + % extractor.load(str, 'FromString',true) + % extractor.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -165,11 +167,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = extractor.getDefaultName() + % name = extractor.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DescriptorExtractor.save, % cv.DescriptorExtractor.load @@ -183,16 +185,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = extractor.defaultNorm() + % ntype = extractor.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % See also: cv.DescriptorExtractor.compute, cv.DescriptorMatcher % @@ -202,7 +204,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = extractor.descriptorSize() + % sz = extractor.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -216,7 +218,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = extractor.descriptorType() + % dtype = extractor.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -230,41 +232,41 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = extractor.compute(img, keypoints) - % [descriptors, keypoints] = extractor.compute(imgs, keypoints) + % [descriptors, keypoints] = extractor.compute(img, keypoints) + % [descriptors, keypoints] = extractor.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant). % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed and the - % remaining ones may be reordered. Sometimes new keypoints - % can be added, for example: cv.SIFT duplicates keypoint - % with several dominant orientations (for each orientation). - % In the first variant, this is a struct-array of detected - % keypoints. In the second variant, it is a cell-array, - % where `keypoints{i}` is a set of keypoints detected in - % `images{i}` (a struct-array like before). - % Each keypoint is a struct with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % which a descriptor cannot be computed are removed and the + % remaining ones may be reordered. Sometimes new keypoints can + % be added, for example: cv.SIFT duplicates keypoint with + % several dominant orientations (for each orientation). In the + % first variant, this is a struct-array of detected keypoints. + % the second variant, it is a cell-array, where `keypoints{i}` + % is a set of keypoints detected in `images{i}` (a struct-array + % like before). Each keypoint is a struct with the following + % fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.DescriptorExtractor.DescriptorExtractor diff --git a/+cv/DescriptorMatcher.m b/+cv/DescriptorMatcher.m index a200e89e3..3785f1549 100644 --- a/+cv/DescriptorMatcher.m +++ b/+cv/DescriptorMatcher.m @@ -15,115 +15,115 @@ % % ## Example % - % X = rand(100,10); - % Y = rand(100,10); - % matcher = cv.DescriptorMatcher('BruteForce'); - % matcher.add(X); - % matcher.train(); % Optional for BruteForce matcher - % matches = matcher.match(Y); + % X = rand(100,10); + % Y = rand(100,10); + % matcher = cv.DescriptorMatcher('BruteForce'); + % matcher.add(X); + % matcher.train(); % Optional for BruteForce matcher + % matches = matcher.match(Y); % % See also: cv.DescriptorExtractor, cv.FeatureDetector, cv.drawMatches, % matchFeatures % properties (SetAccess = private) - id % Object ID - Type % Type of the matcher + % Object ID + id + % Type of the matcher + Type end methods function this = DescriptorMatcher(matcherType, varargin) %DESCRIPTORMATCHER Creates a descriptor matcher by name % - % matcher = cv.DescriptorMatcher(type) - % matcher = cv.DescriptorMatcher(type, 'OptionName',optionValue, ...) + % matcher = cv.DescriptorMatcher(type) + % matcher = cv.DescriptorMatcher(type, 'OptionName',optionValue, ...) % % ## Input % * __type__ In the first variant, it creates a descriptor matcher - % of a given type with the default parameters (using default - % constructor). The following types are recognized: - % - % * __BruteForce__ (default) L2 distance - % * __BruteForce-SL2__ L2SQR distance - % * __BruteForce-L1__ L1 distance - % * __BruteForce-Hamming__, __BruteForce-HammingLUT__ - % * __BruteForce-Hamming(2)__ - % * __FlannBased__ Flann-based indexing - % - % In the second variant, it creates a matcher of the given - % type using the specified parameters. The following - % descriptor matcher types are supported: - % - % * __BFMatcher__ Brute-force descriptor matcher. For each - % descriptor in the first set, this matcher finds the - % closest descriptor in the second set by trying each - % one. This descriptor matcher supports masking - % permissible matches of descriptor sets. - % * __FlannBasedMatcher__ Flann-based descriptor matcher. - % This matcher trains `flann::Index_` on a train - % descriptor collection and calls its nearest search - % methods to find the best matches. So, this matcher - % may be faster when matching a large train collection - % than the brute force matcher. `FlannBasedMatcher` - % does not support masking permissible matches of - % descriptor sets because `flann::Index` does not - % support this. + % of a given type with the default parameters (using default + % constructor). The following types are recognized: + % + % * __BruteForce__ (default) L2 distance + % * __BruteForce-SL2__ L2SQR distance + % * __BruteForce-L1__ L1 distance + % * __BruteForce-Hamming__, __BruteForce-HammingLUT__ + % * __BruteForce-Hamming(2)__ + % * __FlannBased__ Flann-based indexing + % + % In the second variant, it creates a matcher of the given type + % using the specified parameters. The following descriptor + % matcher types are supported: + % + % * __BFMatcher__ Brute-force descriptor matcher. For each + % descriptor in the first set, this matcher finds the closest + % descriptor in the second set by trying each one. This + % descriptor matcher supports masking permissible matches of + % descriptor sets. + % * __FlannBasedMatcher__ Flann-based descriptor matcher. This + % matcher trains `flann::Index_` on a train descriptor + % collection and calls its nearest search methods to find the + % best matches. So, this matcher may be faster when matching a + % large train collection than the brute force matcher. + % `FlannBasedMatcher` does not support masking permissible + % matches of descriptor sets because `flann::Index` does not + % support this. % % ## Options % The Brute-force matcher constructor (`BFMatcher`) accepts the % following options: % % * __NormType__ See cv.DescriptorExtractor.defaultNorm, default - % 'L2'. One of: - % * __L1__, __L2__ L1 and L2 norms are preferable choices - % for cv.SIFT and cv.SURF descriptors. - % * __Hamming__ should be used with cv.ORB, cv.BRISK and - % cv.BriefDescriptorExtractor. - % * __Hamming2__ should be used with cv.ORB when `WTA_K` - % equals 3 or 4 (see cv.ORB.WTA_K description). + % 'L2'. One of: + % * __L1__, __L2__ L1 and L2 norms are preferable choices for + % cv.SIFT and cv.SURF descriptors. + % * __Hamming__ should be used with cv.ORB, cv.BRISK and + % cv.BriefDescriptorExtractor. + % * __Hamming2__ should be used with cv.ORB when `WTA_K` equals + % 3 or 4 (see cv.ORB.WTA_K description). % * __CrossCheck__ If it is false, this is will be default - % `BFMatcher` behaviour when it finds the `k` nearest - % neighbors for each query descriptor. If `CrossCheck==true`, - % then the cv.DescriptorMatcher.knnMatch method with `k=1` - % will only return pairs `(i,j)` such that for i-th query - % descriptor the j-th descriptor in the matcher's collection - % is the nearest and vice versa, i.e. the `BFMatcher` will - % only return consistent pairs. Such technique usually - % produces best results with minimal number of outliers when - % there are enough matches. This is alternative to the ratio - % test, used by *D. Lowe* in SIFT paper. default false + % `BFMatcher` behaviour when it finds the `k` nearest neighbors + % for each query descriptor. If `CrossCheck==true`, then the + % cv.DescriptorMatcher.knnMatch method with `k=1` will only + % return pairs `(i,j)` such that for i-th query descriptor the + % j-th descriptor in the matcher's collection is the nearest and + % vice versa, i.e. the `BFMatcher` will only return consistent + % pairs. Such technique usually produces best results with + % minimal number of outliers when there are enough matches. This + % is alternative to the ratio test, used by *D. Lowe* in SIFT + % paper. default false % % The Flann-based matcher constructor (`FlannBasedMatcher`) takes % the following optional arguments: % % * __Index__ Type of indexer, default 'KDTree'. One of the below. - % Each index type takes optional arguments (see IndexParams - % options below). You can specify the indexer by a cell - % array that starts from the type name followed by option - % arguments: `{'Type', 'OptionName',optionValue, ...}`. - % * __Linear__ Brute-force matching, linear search - % * __KDTree__ Randomized kd-trees, parallel search - % * __KMeans__ Hierarchical k-means tree - % * __HierarchicalClustering__ Hierarchical index - % * __Composite__ Combination of KDTree and KMeans - % * __LSH__ Multi-probe LSH - % * __Autotuned__ Automatic tuning to one of the above - % (`Linear`, `KDTree`, `KMeans`) - % * __Saved__ Load saved index from a file - % - % * __Search__ Option in matching operation. Takes a cell - % array of option pairs: - % * __Checks__ The number of times the tree(s) in the index - % should be recursively traversed. A higher value for - % this parameter would give better search precision, - % but also take more time. If automatic configuration - % was used when the index was created, the number of - % checks required to achieve the specified precision - % was also computed, in which case this parameter is - % ignored. -1 for unlimited. default 32 - % * __EPS__ search for eps-approximate neighbours. default 0 - % * __Sorted__ only for radius search, require neighbours - % sorted by distance. default true + % Each index type takes optional arguments (see IndexParams + % options below). You can specify the indexer by a cell array + % that starts from the type name followed by option arguments: + % `{'Type', 'OptionName',optionValue, ...}`. + % * __Linear__ Brute-force matching, linear search + % * __KDTree__ Randomized kd-trees, parallel search + % * __KMeans__ Hierarchical k-means tree + % * __HierarchicalClustering__ Hierarchical index + % * __Composite__ Combination of KDTree and KMeans + % * __LSH__ Multi-probe LSH + % * __Autotuned__ Automatic tuning to one of the above + % (`Linear`, `KDTree`, `KMeans`) + % * __Saved__ Load saved index from a file + % + % * __Search__ Option in matching operation. Takes a cell array of + % option pairs: + % * __Checks__ The number of times the tree(s) in the index + % should be recursively traversed. A higher value for this + % parameter would give better search precision, but also take + % more time. If automatic configuration was used when the + % index was created, the number of checks required to achieve + % the specified precision was also computed, in which case + % this parameter is ignored. -1 for unlimited. default 32 + % * __EPS__ search for eps-approximate neighbours. default 0 + % * __Sorted__ only for radius search, require neighbours sorted + % by distance. default true % % ## IndexParams Options for `FlannBasedMatcher` % @@ -138,31 +138,31 @@ % % ### `KDTree` and `Composite` % * __Trees__ The number of parallel kd-trees to use. Good values - % are in the range [1..16]. default 4 + % are in the range [1..16]. default 4 % % ### `KMeans` and `Composite` % * __Branching__ The branching factor to use for the hierarchical - % k-means tree. default 32 + % k-means tree. default 32 % * __Iterations__ The maximum number of iterations to use in the - % k-means clustering stage when building the k-means tree. - % A value of -1 used here means that the k-means clustering - % should be iterated until convergence. default 11 + % k-means clustering stage when building the k-means tree. A + % value of -1 used here means that the k-means clustering should + % be iterated until convergence. default 11 % * __CentersInit__ The algorithm to use for selecting the initial - % centers when performing a k-means clustering step. The - % possible values are (default is 'Random'): - % * __Random__ picks the initial cluster centers randomly - % * __Gonzales__ picks the initial centers using Gonzales - % algorithm - % * __KMeansPP__ picks the initial centers using the - % algorithm suggested in [ArthurKmeansPP2007] - % * __Groupwise__ chooses the initial centers in a way - % inspired by Gonzales (by Pierre-Emmanuel Viel). + % centers when performing a k-means clustering step. The + % possible values are (default is 'Random'): + % * __Random__ picks the initial cluster centers randomly + % * __Gonzales__ picks the initial centers using Gonzales + % algorithm + % * __KMeansPP__ picks the initial centers using the algorithm + % suggested in [ArthurKmeansPP2007] + % * __Groupwise__ chooses the initial centers in a way inspired + % by Gonzales (by Pierre-Emmanuel Viel). % * __CBIndex__ This parameter (cluster boundary index) influences - % the way exploration is performed in the hierarchical - % kmeans tree. When `CBIndex` is zero the next kmeans domain - % to be explored is choosen to be the one with the closest - % center. A value greater then zero also takes into account - % the size of the domain. default 0.2 + % the way exploration is performed in the hierarchical kmeans + % tree. When `CBIndex` is zero the next kmeans domain to be + % explored is choosen to be the one with the closest center. A + % value greater then zero also takes into account the size of + % the domain. default 0.2 % % ### `HierarchicalClustering` % * __Branching__ same as above. @@ -172,51 +172,51 @@ % % ### `LSH` % * __TableNumber__ The number of hash tables to use (usually - % between 10 and 30). default 20 + % between 10 and 30). default 20 % * __KeySize__ The length of the key in the hash tables (usually - % between 10 and 20). default 15 + % between 10 and 20). default 15 % * __MultiProbeLevel__ Number of levels to use in multi-probe - % (0 is regular LSH, 2 is recommended). default 0 + % (0 is regular LSH, 2 is recommended). default 0 % % ### `Autotuned` % * __TargetPrecision__ Is a number between 0 and 1 specifying the - % percentage of the approximate nearest-neighbor searches - % that return the exact nearest-neighbor. Using a higher - % value for this parameter gives more accurate results, but - % the search takes longer. The optimum value usually depends - % on the application. default 0.8 + % percentage of the approximate nearest-neighbor searches that + % return the exact nearest-neighbor. Using a higher value for + % this parameter gives more accurate results, but the search + % takes longer. The optimum value usually depends on the + % application. default 0.8 % * __BuildWeight__ Specifies the importance of the index build - % time raported to the nearest-neighbor search time. In some - % applications it is acceptable for the index build step to - % take a long time if the subsequent searches in the index - % can be performed very fast. In other applications it is - % required that the index be build as fast as possible even - % if that leads to slightly longer search times. default 0.01 + % time raported to the nearest-neighbor search time. In some + % applications it is acceptable for the index build step to take + % a long time if the subsequent searches in the index can be + % performed very fast. In other applications it is required that + % the index be build as fast as possible even if that leads to + % slightly longer search times. default 0.01 % * __MemoryWeight__ Is used to specify the tradeoff between time - % (index build time and search time) and memory used by the - % index. A value less than 1 gives more importance to the - % time spent and a value greater than 1 gives more - % importance to the memory usage. default 0 + % (index build time and search time) and memory used by the + % index. A value less than 1 gives more importance to the time + % spent and a value greater than 1 gives more importance to the + % memory usage. default 0 % * __SampleFraction__ Is a number between 0 and 1 indicating what - % fraction of the dataset to use in the automatic parameter - % configuration algorithm. Running the algorithm on the full - % dataset gives the most accurate results, but for very - % large datasets can take longer than desired. In such case - % using just a fraction of the data helps speeding up this - % algorithm while still giving good approximations of the - % optimum parameters. default 0.1 + % fraction of the dataset to use in the automatic parameter + % configuration algorithm. Running the algorithm on the full + % dataset gives the most accurate results, but for very large + % datasets can take longer than desired. In such case using just + % a fraction of the data helps speeding up this algorithm while + % still giving good approximations of the optimum parameters. + % default 0.1 % % ## Example % For example, `KDTree` with tree size = 4 is specified by: % - % matcher = cv.DescriptorMatcher('FlannBasedMatcher', ... - % 'Index', {'KDTree', 'Trees', 4}, ... - % 'Search', {'Sorted', true}) + % matcher = cv.DescriptorMatcher('FlannBasedMatcher', ... + % 'Index', {'KDTree', 'Trees', 4}, ... + % 'Search', {'Sorted', true}) % % Here is an example for loading a saved index: % - % matcher = cv.DescriptorMatcher('FlannBasedMatcher', ... - % 'Index', {'Saved', '/path/to/saved/index.xml'}) + % matcher = cv.DescriptorMatcher('FlannBasedMatcher', ... + % 'Index', {'Saved', '/path/to/saved/index.xml'}) % % ## References % [ArthurKmeansPP2007]: @@ -241,7 +241,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % matcher.delete() % % See also: cv.DescriptorMatcher % @@ -252,7 +252,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = matcher.typeid() % % ## Output % * __typename__ Name of C++ type @@ -266,7 +266,7 @@ function delete(this) function clear(this) %CLEAR Clears the train descriptor collection % - % matcher.clear() + % matcher.clear() % % See also: cv.DescriptorMatcher.empty % @@ -276,7 +276,7 @@ function clear(this) function status = empty(this) %EMPTY Returns true if there are no train descriptors in the collection % - % status = matcher.empty() + % status = matcher.empty() % % ## Output % * __status__ boolean status @@ -289,7 +289,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % matcher.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -305,21 +305,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % matcher.load(fname) + % matcher.load(str, 'FromString',true) + % matcher.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -333,11 +333,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = matcher.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DescriptorMatcher.save, cv.DescriptorMatcher.load % @@ -350,7 +350,7 @@ function load(this, fname_or_str, varargin) function status = isMaskSupported(this) %ISMASKSUPPORTED Returns true if the descriptor matcher supports masking permissible matches % - % status = matcher.isMaskSupported() + % status = matcher.isMaskSupported() % % ## Output % * __status__ boolean status. @@ -366,11 +366,11 @@ function load(this, fname_or_str, varargin) function descriptors = getTrainDescriptors(this) %GETTRAINDESCRIPTORS Returns the train descriptor collection % - % descriptors = matcher.getTrainDescriptors() + % descriptors = matcher.getTrainDescriptors() % - % ## Outpt + % ## Output % * __descriptors__ Set of train descriptors. A cell array of - % matrices. + % matrices. % % See also: cv.DescriptorMatcher.add % @@ -380,16 +380,16 @@ function load(this, fname_or_str, varargin) function add(this, descriptors) %ADD Adds descriptors to train a descriptor collection % - % matcher.add(descriptors) + % matcher.add(descriptors) % % If the collection is not empty, the new descriptors are added % to existing train descriptors. % % ## Input - % * __descriptors__ Descriptors to add. Each `descriptors{i}` is - % a set of descriptors from the same train image. - % Can be either a matrix or a cell array of matrices - % (matrices of type `uint8` or `single`) + % * __descriptors__ Descriptors to add. Each `descriptors{i}` is a + % set of descriptors from the same train image. Can be either a + % matrix or a cell array of matrices (matrices of type `uint8` + % or `single`) % % See also: cv.DescriptorMatcher.getTrainDescriptors % @@ -399,7 +399,7 @@ function add(this, descriptors) function train(this) %TRAIN Trains a descriptor matcher % - % matcher.train() + % matcher.train() % % Trains a descriptor matcher (for example, the flann index). In all % methods to match, the method `train` is run every time before @@ -414,38 +414,38 @@ function train(this) function matches = match(this, queryDescriptors, varargin) %MATCH Finds the best match for each descriptor from a query set % - % matches = matcher.match(queryDescriptors, trainDescriptors) - % matches = matcher.match(queryDescriptors) - % [...] = matcher.match(..., 'OptionName', optionValue, ...) + % matches = matcher.match(queryDescriptors, trainDescriptors) + % matches = matcher.match(queryDescriptors) + % [...] = matcher.match(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ Query set of descriptors. % * __trainDescriptors__ Train set of descriptors. This set is not - % added to the train descriptors collection stored in the - % class object. + % added to the train descriptors collection stored in the class + % object. % % ## Output % * __matches__ Matches. If a query descriptor is masked out in - % `Mask`, no match is added for this descriptor. So, - % `matches` size may be smaller than the query descriptors - % count. A 1-by-N structure array with the following fields: - % * __queryIdx__ query descriptor index (zero-based index) - % * __trainIdx__ train descriptor index (zero-based index) - % * __imgIdx__ train image index (zero-based index) - % * __distance__ distance between descriptors (scalar) + % `Mask`, no match is added for this descriptor. So, + % `matches` size may be smaller than the query descriptors + % count. A 1-by-N structure array with the following fields: + % * __queryIdx__ query descriptor index (zero-based index) + % * __trainIdx__ train descriptor index (zero-based index) + % * __imgIdx__ train image index (zero-based index) + % * __distance__ distance between descriptors (scalar) % % ## Options % * __Mask__ default empty - % * In the first form, mask specifying permissible matches - % between an input query and train matrices of descriptors. - % Matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. - % * In the second form, set of masks. Each `masks{i}` - % specifies permissible matches between the input query - % descriptors and stored train descriptors from the i-th - % image `trainDescCollection{i}`. Cell array of length - % `length(trainDescriptors)`, each a matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. + % * In the first form, mask specifying permissible matches + % between an input query and train matrices of descriptors. + % Matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. + % * In the second form, set of masks. Each `masks{i}` specifies + % permissible matches between the input query descriptors and + % stored train descriptors from the i-th image + % `trainDescCollection{i}`. Cell array of length + % `length(trainDescriptors)`, each a matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. % % In the first variant of this method, the train descriptors are % passed as an input argument. In the second variant of the @@ -464,53 +464,52 @@ function train(this) function matches = knnMatch(this, queryDescriptors, varargin) %KNNMATCH Finds the k best matches for each descriptor from a query set % - % matches = matcher.knnMatch(queryDescriptors, trainDescriptors, k) - % matches = matcher.knnMatch(queryDescriptors, k) - % [...] = matcher.knnMatch(..., 'OptionName', optionValue, ...) + % matches = matcher.knnMatch(queryDescriptors, trainDescriptors, k) + % matches = matcher.knnMatch(queryDescriptors, k) + % [...] = matcher.knnMatch(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ Query set of descriptors. % * __trainDescriptors__ Train set of descriptors. This set is not - % added to the train descriptors collection stored in the - % class object. + % added to the train descriptors collection stored in the class + % object. % * __k__ Count of best matches found per each query descriptor or - % less if a query descriptor has less than `k` possible - % matches in total. + % less if a query descriptor has less than `k` possible matches + % in total. % % ## Output % * __matches__ Matches. Each `matches{i}` is `k` or less matches - % for the same query descriptor. A cell array of length - % `size(queryDescriptors,1)`, each cell is a 1-by-(k or less) - % structure array that has the following fields: - % * __queryIdx__ query descriptor index (zero-based index) - % * __trainIdx__ train descriptor index (zero-based index) - % * __imgIdx__ train image index (zero-based index) - % * __distance__ distance between descriptors (scalar) + % for the same query descriptor. A cell array of length + % `size(queryDescriptors,1)`, each cell is a 1-by-(k or less) + % structure array that has the following fields: + % * __queryIdx__ query descriptor index (zero-based index) + % * __trainIdx__ train descriptor index (zero-based index) + % * __imgIdx__ train image index (zero-based index) + % * __distance__ distance between descriptors (scalar) % % ## Options % * __Mask__ default empty - % * In the first form, mask specifying permissible matches - % between an input query and train matrices of descriptors. - % Matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. - % * In the second form, set of masks. Each `masks{i}` - % specifies permissible matches between the input query - % descriptors and stored train descriptors from the i-th - % image `trainDescCollection{i}`. Cell array of length - % `length(trainDescriptors)`, each a matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. + % * In the first form, mask specifying permissible matches + % between an input query and train matrices of descriptors. + % Matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. + % * In the second form, set of masks. Each `masks{i}` specifies + % permissible matches between the input query descriptors and + % stored train descriptors from the i-th image + % `trainDescCollection{i}`. Cell array of length + % `length(trainDescriptors)`, each a matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. % * __CompactResult__ Parameter used when the mask (or masks) is - % not empty. If `CompactResult` is false, the `matches` - % vector has the same size as `queryDescriptors` rows. If - % `CompactResult` is true, the matches vector does not - % contain matches for fully masked-out query descriptors. - % default false - % - % This extended variant of cv.DescriptorMatcher.match method - % finds several best matches for each query descriptor. The - % matches are returned in the distance increasing order. See - % cv.DescriptorMatcher.match for the details about query and - % train descriptors. + % not empty. If `CompactResult` is false, the `matches` vector + % has the same size as `queryDescriptors` rows. If + % `CompactResult` is true, the matches vector does not contain + % matches for fully masked-out query descriptors. default false + % + % This extended variant of cv.DescriptorMatcher.match method finds + % several best matches for each query descriptor. The matches are + % returned in the distance increasing order. See + % cv.DescriptorMatcher.match for the details about query and train + % descriptors. % % See also: cv.DescriptorMatcher.match, % cv.DescriptorMatcher.radiusMatch, cv.batchDistance @@ -522,47 +521,46 @@ function train(this) function matches = radiusMatch(this, queryDescriptors, varargin) %RADIUSMATCH For each query descriptor, finds the training descriptors not farther than the specified distance % - % matches = matcher.radiusMatch(queryDescriptors, trainDescriptors, maxDistance) - % matches = matcher.radiusMatch(queryDescriptors, maxDistance) - % [...] = matcher.radiusMatch(..., 'OptionName', optionValue, ...) + % matches = matcher.radiusMatch(queryDescriptors, trainDescriptors, maxDistance) + % matches = matcher.radiusMatch(queryDescriptors, maxDistance) + % [...] = matcher.radiusMatch(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ Query set of descriptors. % * __trainDescriptors__ Train set of descriptors. This set is not - % added to the train descriptors collection stored in the - % class object. + % added to the train descriptors collection stored in the class + % object. % * __maxDistance__ Threshold for the distance between matched - % descriptors. Distance here means metric distance (e.g. - % Hamming distance), not the distance between coordinates - % (which is measured in Pixels)! + % descriptors. Distance here means metric distance (e.g. Hamming + % distance), not the distance between coordinates (which is + % measured in Pixels)! % % ## Output % * __matches__ Found matches. A cell array of length - % `size(queryDescriptors,1)`, each cell is a structure array - % that has the following fields: - % * __queryIdx__ query descriptor index (zero-based index) - % * __trainIdx__ train descriptor index (zero-based index) - % * __imgIdx__ train image index (zero-based index) - % * __distance__ distance between descriptors (scalar) + % `size(queryDescriptors,1)`, each cell is a structure array + % that has the following fields: + % * __queryIdx__ query descriptor index (zero-based index) + % * __trainIdx__ train descriptor index (zero-based index) + % * __imgIdx__ train image index (zero-based index) + % * __distance__ distance between descriptors (scalar) % % ## Options % * __Mask__ default empty - % * In the first form, mask specifying permissible matches - % between an input query and train matrices of descriptors. - % Matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. - % * In the second form, set of masks. Each `masks{i}` - % specifies permissible matches between the input query - % descriptors and stored train descriptors from the i-th - % image `trainDescCollection{i}`. Cell array of length - % `length(trainDescriptors)`, each a matrix of size - % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. + % * In the first form, mask specifying permissible matches + % between an input query and train matrices of descriptors. + % Matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors,1)]`. + % * In the second form, set of masks. Each `masks{i}` specifies + % permissible matches between the input query descriptors and + % stored train descriptors from the i-th image + % `trainDescCollection{i}`. Cell array of length + % `length(trainDescriptors)`, each a matrix of size + % `[size(queryDescriptors,1),size(trainDescriptors{i},1)]`. % * __CompactResult__ Parameter used when the mask (or masks) is - % not empty. If `CompactResult` is false, the `matches` - % vector has the same size as `queryDescriptors` rows. If - % `CompactResult` is true, the matches vector does not - % contain matches for fully masked-out query descriptors. - % default false + % not empty. If `CompactResult` is false, the `matches` vector + % has the same size as `queryDescriptors` rows. If + % `CompactResult` is true, the matches vector does not contain + % matches for fully masked-out query descriptors. default false % % For each query descriptor, the methods find such training % descriptors that the distance between the query descriptor and diff --git a/+cv/DetectionBasedTracker.m b/+cv/DetectionBasedTracker.m index e2708d938..29346b3e0 100644 --- a/+cv/DetectionBasedTracker.m +++ b/+cv/DetectionBasedTracker.m @@ -13,32 +13,31 @@ function this = DetectionBasedTracker(mainDetector, trackingDetector, varargin) %DETECTIONBASEDTRACKER Creates a new tracker object % - % tracker = cv.DetectionBasedTracker(mainDetector, trackingDetector) - % tracker = cv.DetectionBasedTracker(..., 'OptionName',optionValue, ...) + % tracker = cv.DetectionBasedTracker(mainDetector, trackingDetector) + % tracker = cv.DetectionBasedTracker(..., 'OptionName',optionValue, ...) % % ## Input % * __mainDetector__, __trackingDetector__ detector of the form - % `{filename, 'key',val, ...}` where `filename` is the name - % of the file from which the classifier is loaded. See - % detection options below for valid key/value pairs. - % Currently only one detector based on cv.CascadeClassifier - % is supported. + % `{filename, 'key',val, ...}` where `filename` is the name of + % the file from which the classifier is loaded. See detection + % options below for valid key/value pairs. Currently only one + % detector based on cv.CascadeClassifier is supported. % % ## Options % * __MaxTrackLifetime__ must be non-negative. default 5 % * __MinDetectionPeriod__ the minimal time between run of the big - % object detector (on the whole frame) in msec (1000 mean - % 1 sec). default 0 + % object detector (on the whole frame) in msec (1000 mean 1 sec). + % default 0 % % ## CascadeClassifier detection options % * __ScaleFactor__ Parameter specifying how much the image size - % is reduced at each image scale. default 1.1 + % is reduced at each image scale. default 1.1 % * __MinNeighbors__ Parameter specifying how many neighbors each - % candiate rectangle should have to retain it. default 2 + % candiate rectangle should have to retain it. default 2 % * __MinSize__ Minimum possible object size. Objects smaller than - % that are ignored. default `[96,96]`. + % that are ignored. default `[96,96]`. % * __MaxSize__ Maximum possible object size. Objects larger than - % that are ignored. default `[intmax,intmax]`. + % that are ignored. default `[intmax,intmax]`. % % See also: cv.DetectionBasedTracker.process % @@ -48,7 +47,7 @@ function delete(this) %DELETE Destructor % - % tracker.delete() + % tracker.delete() % % See also: cv.DetectionBasedTracker % @@ -59,7 +58,7 @@ function delete(this) function success = run(this) %RUN Run tracker % - % success = tracker.run() + % success = tracker.run() % % ## Output % * __success__ success logical flag @@ -72,7 +71,7 @@ function delete(this) function stop(this) %STOP Stop tracker % - % tracker.stop() + % tracker.stop() % % See also: cv.DetectionBasedTracker.run % @@ -82,7 +81,7 @@ function stop(this) function resetTracking(this) %RESETTRACKING Reset tracker % - % tracker.resetTracking() + % tracker.resetTracking() % % See also: cv.DetectionBasedTracker.run % @@ -92,12 +91,12 @@ function resetTracking(this) function params = getParameters(this) %GETPARAMETERS Get current tracker parameters % - % params = tracker.getParameters() + % params = tracker.getParameters() % % ## Output % * __params__ a struct containing with the following fields: - % * __maxTrackLifetime__ - % * __minDetectionPeriod__ + % * __maxTrackLifetime__ + % * __minDetectionPeriod__ % % See also: cv.DetectionBasedTracker.setParameters % @@ -107,7 +106,7 @@ function resetTracking(this) function success = setParameters(this, varargin) %SETPARAMETERS Set current tracker parameters % - % success = tracker.setParameters('OptionName',optionValue, ...) + % success = tracker.setParameters('OptionName',optionValue, ...) % % ## Output % * __success__ success logical flag. @@ -115,8 +114,8 @@ function resetTracking(this) % ## Options % * __MaxTrackLifetime__ must be non-negative. default 5 % * __MinDetectionPeriod__ the minimal time between run of the big - % object detector (on the whole frame) in msec (1000 mean - % 1 sec). default 0 + % object detector (on the whole frame) in msec (1000 mean 1 sec). + % default 0 % % See also: cv.DetectionBasedTracker.getParameters % @@ -126,7 +125,7 @@ function resetTracking(this) function process(this, imageGray) %PROCESS Process new frame % - % tracker.process(imageGray) + % tracker.process(imageGray) % % ## Input % * __imageGray__ 8-bit 1-channel gray image. @@ -139,7 +138,7 @@ function process(this, imageGray) function id = addObject(this, location) %ADDOBJECT Track new object % - % tracker.addObject(location) + % tracker.addObject(location) % % ## Input % * __location__ rectangle containing object `[x,y,w,h]`. @@ -155,12 +154,12 @@ function process(this, imageGray) function [boxes, ids] = getObjects(this) %GETOBJECTS Return tracked objects % - % boxes = tracker.getObjects() - % [boxes, ids] = tracker.getObjects() + % boxes = tracker.getObjects() + % [boxes, ids] = tracker.getObjects() % % ## Output % * __boxes__ Cell array of rectangles where each rectangle - % contains the detected object `{[x,y,w,h], ...}`. + % contains the detected object `{[x,y,w,h], ...}`. % * __ids__ optional vector of corresponding tracked object ID. % % See also: cv.DetectionBasedTracker.getObjectsExtended @@ -175,19 +174,19 @@ function process(this, imageGray) function objects = getObjectsExtended(this) %GETOBJECTSEXTENDED Return tracked objects (extended) % - % objects = tracker.getObjectsExtended() + % objects = tracker.getObjectsExtended() % % ## Output % * __objects__ a struct-array of tracked objects with the - % following fields: - % * __id__ tracked object ID - % * __location__ rectangle contains the detected object - % `[x,y,w,h]`. - % * __status__ object status. One of the following strings: - % * __DetectedNotShownYet__ - % * __Detected__ - % * __DetectedTemporaryLost__ - % * __WrongObject__ + % following fields: + % * __id__ tracked object ID + % * __location__ rectangle contains the detected object + % `[x,y,w,h]`. + % * __status__ object status. One of the following strings: + % * __DetectedNotShownYet__ + % * __Detected__ + % * __DetectedTemporaryLost__ + % * __WrongObject__ % % See also: cv.DetectionBasedTracker.getObjects % diff --git a/+cv/DownhillSolver.m b/+cv/DownhillSolver.m index 0f31dbe8c..0a41f51f2 100644 --- a/+cv/DownhillSolver.m +++ b/+cv/DownhillSolver.m @@ -4,7 +4,7 @@ % defined on an `n`-dimensional Euclidean space, using the **Nelder-Mead** % method, also known as downhill simplex method. The basic idea about the % method can be obtained from - % [Nelder-Mead method](http://en.wikipedia.org/wiki/Nelder-Mead_method). + % [Nelder-Mead method](https://en.wikipedia.org/wiki/Nelder-Mead_method). % % It should be noted, that this method, although deterministic, is rather % a heuristic and therefore may converge to a local minima, not necessary @@ -33,7 +33,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -79,21 +80,21 @@ function this = DownhillSolver(varargin) %DOWNHILLSOLVER Creates a DownhillSolver object % - % solver = cv.DownhillSolver() - % solver = cv.DownhillSolver('OptionName', optionValue, ...) + % solver = cv.DownhillSolver() + % solver = cv.DownhillSolver('OptionName', optionValue, ...) % % ## Options % * __Function__ Objective function that will be minimized, - % specified as a structure with the following fields: - % * __dims__ Number of dimensions - % * __fun__ string, name of M-file that implements the - % `calc` method. It should receive a vector of the - % specified dimension, and return a scalar value of - % the objective function evaluated at that point. + % specified as a structure with the following fields: + % * __dims__ Number of dimensions + % * __fun__ string, name of M-file that implements the `calc` + % method. It should receive a vector of the specified + % dimension, and return a scalar value of the objective + % function evaluated at that point. % * __InitStep__ Initial step, that will be used to construct the - % initial simplex. default `[1, 1, 0.0]`. + % initial simplex. default `[1, 1, 0.0]`. % * __TermCriteria__ Terminal criteria to the algorithm. default - % `struct('type','Count+EPS', 'maxCount',5000, 'epsilon',1e-6)` + % `struct('type','Count+EPS', 'maxCount',5000, 'epsilon',1e-6)` % % All the parameters are optional, so this procedure can be called % even without parameters at all. In this case, the default values @@ -112,7 +113,7 @@ function delete(this) %DELETE Destructor % - % solver.delete() + % solver.delete() % % See also: cv.DownhillSolver % @@ -123,16 +124,16 @@ function delete(this) function [x,fx] = minimize(this, x0) %MINIMIZE Runs the algorithm and performs the minimization % - % [x,fx] = solver.minimize(x0) + % [x,fx] = solver.minimize(x0) % % ## Input % * __x0__ The initial point, that will become a centroid of an - % initial simplex. + % initial simplex. % % ## Output % * __x__ After the algorithm will terminate, it will be setted - % to the point where the algorithm stops, the point of - % possible minimum. + % to the point where the algorithm stops, the point of possible + % minimum. % * __fx__ The value of a function at the point found. % % The sole input parameter determines the centroid of the starting @@ -150,7 +151,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % solver.clear() + % solver.clear() % % See also: cv.DownhillSolver.empty % @@ -160,11 +161,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = solver.empty() + % b = solver.empty() % % ## Output - % * __b__ returns true of the algorithm is empty - % (e.g. in the very beginning or after unsuccessful read). + % * __b__ returns true of the algorithm is empty (e.g. in the very + % beginning or after unsuccessful read). % % See also: cv.DownhillSolver.clear % @@ -174,11 +175,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = solver.getDefaultName() + % name = solver.getDefaultName() % % ## Output - % * __name__ This string is used as top level xml/yml node tag - % when the object is saved to a file or string. + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. % % See also: cv.DownhillSolver.save, cv.DownhillSolver.load % @@ -188,7 +189,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -203,21 +204,21 @@ function save(this, filename) function load(this, fname_or_str) %LOAD Loads algorithm from a file or a string % - % obj.load(filename) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input - % * __filename__ Name of the file to read. + % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/DualTVL1OpticalFlow.m b/+cv/DualTVL1OpticalFlow.m index e97ec022f..659225a63 100644 --- a/+cv/DualTVL1OpticalFlow.m +++ b/+cv/DualTVL1OpticalFlow.m @@ -45,7 +45,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -84,7 +85,7 @@ function this = DualTVL1OpticalFlow() %DUALTVL1OPTICALFLOW Creates instance of DualTVL1OpticalFlow % - % obj = cv.DualTVL1OpticalFlow() + % obj = cv.DualTVL1OpticalFlow() % % See also: cv.DualTVL1OpticalFlow.calc % @@ -94,7 +95,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.DualTVL1OpticalFlow % @@ -108,17 +109,17 @@ function delete(this) function flow = calc(this, I0, I1, varargin) %CALC Calculates an optical flow % - % flow = obj.calc(I0, I1) - % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) + % flow = obj.calc(I0, I1) + % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * __flow__ computed flow image that has the same size as `I0` - % and type `single` (2-channels). + % and type `single` (2-channels). % % ## Options % * __InitialFlow__ specify the initial flow. Not set by default. @@ -131,7 +132,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Releases all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % DualTVL1OpticalFlow_(this.id, 'collectGarbage'); end @@ -142,7 +143,7 @@ function collectGarbage(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.DualTVL1OpticalFlow.empty % @@ -152,11 +153,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.DualTVL1OpticalFlow.clear % @@ -166,11 +167,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DualTVL1OpticalFlow.save, cv.DualTVL1OpticalFlow.load % @@ -180,7 +181,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -195,21 +196,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/EM.m b/+cv/EM.m index a22022703..8ef691352 100644 --- a/+cv/EM.m +++ b/+cv/EM.m @@ -13,12 +13,12 @@ % Consider the set of the `N` feature vectors `{x1,x2,...,xN}` from a % `d`-dimensional Euclidean space drawn from a Gaussian mixture: % - % p(x;a_k,S_k,PI_k) = sum_{k=1..m} PI_k * p_k(x), - % PI_k>=0, sum_{k=1..m}(PI_k)=1 + % p(x;a_k,S_k,PI_k) = sum_{k=1..m} PI_k * p_k(x), + % PI_k>=0, sum_{k=1..m}(PI_k)=1 % - % p_k(x) = phi(x;a_k,S_k) - % = 1/((2*pi)^(d/2) * |Sk|^(1/2)) * - % exp(-0.5 * (x - a_k)' * inv(S_k) * (x - a_k)) + % p_k(x) = phi(x;a_k,S_k) + % = 1/((2*pi)^(d/2) * |Sk|^(1/2)) * + % exp(-0.5 * (x - a_k)' * inv(S_k) * (x - a_k)) % % where `m` is the number of mixtures, `p_k` is the normal distribution % density with the mean `a_k` and covariance matrix `S_k`, `PI_k` is the @@ -27,12 +27,12 @@ % estimates (MLE) of all the mixture parameters, that is, `a_k`, `S_k` and % `PI_k`: % - % L(x,theta) = logp(x,theta) - % = sum_{i=1..N} log(sum_{k=1..m} PI_k * p_k(x)) - % -> argmax_{theta IN THETA}, + % L(x,theta) = logp(x,theta) + % = sum_{i=1..N} log(sum_{k=1..m} PI_k * p_k(x)) + % -> argmax_{theta IN THETA}, % - % THETA = {(a_k,S_k,PI_k): a_k IN R^d, S_k=S_k'>0, S_k IN R^(dxd), - % PI_k>=0, sum_{k=1..m}(PI_k)=1} + % THETA = {(a_k,S_k,PI_k): a_k IN R^d, S_k=S_k'>0, S_k IN R^(dxd), + % PI_k>=0, sum_{k=1..m}(PI_k)=1} % % The EM algorithm is an iterative procedure. Each iteration includes two % steps. At the first step (Expectation step or E-step), you find a @@ -40,16 +40,16 @@ % sample `i` to belong to mixture `k` using the currently available % mixture parameter estimates: % - % alpha_{k,i} = ( PI_k * phi(x;a_k,S_k) ) / - % sum_{j=1..m} (PI_j * phi(x;a_j,S_j)) + % alpha_{k,i} = ( PI_k * phi(x;a_k,S_k) ) / + % sum_{j=1..m} (PI_j * phi(x;a_j,S_j)) % % At the second step (Maximization step or M-step), the mixture parameter % estimates are refined using the computed probabilities: % - % PI_k = (1/N) * sum_{i=1..N}(alpha_{k,i}) - % a_k = sum_{i=1..N}(alpha_{k,i}*x_i) / sum_{i=1..N}(alpha_{k,i}) - % S_k = sum_{i=1..N}(alpha_{k,i} * (x_i - a_k)*(x_i - a_k)') / - % sum_{i=1..N}(alpha_{k,i}) + % PI_k = (1/N) * sum_{i=1..N}(alpha_{k,i}) + % a_k = sum_{i=1..N}(alpha_{k,i}*x_i) / sum_{i=1..N}(alpha_{k,i}) + % S_k = sum_{i=1..N}(alpha_{k,i} * (x_i - a_k)*(x_i - a_k)') / + % sum_{i=1..N}(alpha_{k,i}) % % Alternatively, the algorithm may start with the M-step when the initial % values for `p_{i,k}` can be provided. Another alternative when `p_{i,k}` @@ -98,21 +98,20 @@ % Possible values: % % * __Spherical__ A scaled identity matrix `mu_k*I`. There is the only - % parameter `mu_k` to be estimated for each matrix. The option - % may be used in special cases, when the constraint is relevant, - % or as a first step in the optimization (for example in case - % when the data is preprocessed with cv.PCA). The results of - % such preliminary estimation may be passed again to the - % optimization procedure, this time with - % `CovarianceMatrixType='Diagonal'`. + % parameter `mu_k` to be estimated for each matrix. The option may + % be used in special cases, when the constraint is relevant, or as a + % first step in the optimization (for example in case when the data + % is preprocessed with cv.PCA). The results of such preliminary + % estimation may be passed again to the optimization procedure, this + % time with `CovarianceMatrixType='Diagonal'`. % * __Diagonal__ A diagonal matrix with positive diagonal elements. - % The number of free parameters is `d` for each matrix. This is - % most commonly used option yielding good estimation results. + % The number of free parameters is `d` for each matrix. This is most + % commonly used option yielding good estimation results. % * __Generic__ A symmetric positively defined matrix. The number of - % free parameters in each matrix is about `d^2/2`. It is not - % recommended to use this option, unless there is pretty - % accurate initial estimation of the parameters and/or a huge - % number of training samples. + % free parameters in each matrix is about `d^2/2`. It is not + % recommended to use this option, unless there is pretty accurate + % initial estimation of the parameters and/or a huge number of + % training samples. % * __Default__ Synonym for 'Diagonal'. This is the default. CovarianceMatrixType @@ -130,8 +129,8 @@ function this = EM(varargin) %EM EM constructor % - % model = cv.EM() - % model = cv.EM(filename) + % model = cv.EM() + % model = cv.EM(filename) % % Creates empty EM model. The model should be trained then using % train method. Alternatively, you can use one of the other train* @@ -143,8 +142,8 @@ % by some earlier created object. % % ## Input - % * __filename__ Name of XML or YML file, containing - % previously saved model + % * __filename__ Name of XML or YML file, containing previously + % saved model % % See also: cv.EM, cv.EM.train, cv.EM.load % @@ -157,7 +156,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.EM % @@ -171,7 +170,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -187,11 +186,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.EM.clear, cv.EM.load % @@ -201,17 +200,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -225,23 +224,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -255,11 +253,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.EM.save, cv.EM.load % @@ -272,7 +270,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -285,7 +283,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -298,11 +296,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % Always true for EM. % @@ -314,117 +312,110 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, varargin) %TRAIN Estimates the Gaussian mixture parameters from a samples set % - % status = model.train(samples) - % status = model.train(csvFilename) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples) + % status = model.train(csvFilename) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Samples from which the Gaussian mixture model will - % be estimated. It should be a floating-point matrix, each - % row of which is a sample (see the `Layout` option). - % Internally the computations are performed in `double` - % precision type. + % be estimated. It should be a floating-point matrix, each row + % of which is a sample (see the `Layout` option). Internally the + % computations are performed in `double` precision type. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ The method returns true if the Gaussian mixture - % model was trained successfully, otherwise it returns - % false. + % model was trained successfully, otherwise it returns false. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -436,14 +427,14 @@ function load(this, fname_or_str, varargin) % start step: % % * __trainEM__ Starts with Expectation step. Initial values of - % the model parameters will be estimated by the k-means - % algorithm. + % the model parameters will be estimated by the k-means + % algorithm. % * __trainE__ Starts with Expectation step. You need to provide - % initial means `a_k` of mixture components. Optionally you - % can pass initial weights `PI_k` and covariance matrices - % `S_k` of mixture components. + % initial means `a_k` of mixture components. Optionally you can + % pass initial weights `PI_k` and covariance matrices `S_k` of + % mixture components. % * __trainM__ Starts with Maximization step. You need to provide - % initial probabilities `p_{i,k}` to use this option. + % initial probabilities `p_{i,k}` to use this option. % % See also: cv.EM.predict, cv.EM.trainE, cv.EM.trainM, cv.EM.trainEM % @@ -453,10 +444,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -470,14 +461,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -491,24 +481,23 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples, floating-point matrix. % % ## Output % * __results__ Output matrix of posterior probabilities of each - % component given a sample. A `double` matrix of size - % `nsamples-by-ClustersNumber`. + % component given a sample. A `double` matrix of size + % `nsamples-by-ClustersNumber`. % * __f__ If you pass one sample then this returns the zero-based - % index of the most probable mixture component for the given - % sample. Otherwise the result for the first sample is - % returned. + % index of the most probable mixture component for the given + % sample. Otherwise the result for the first sample is returned. % % ## Options % * __Flags__ The optional predict flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % Returns posterior probabilities for the provided samples. % @@ -523,7 +512,7 @@ function load(this, fname_or_str, varargin) function weights = getWeights(this) %GETWEIGHTS Returns weights of the mixtures % - % weights = model.getWeights() + % weights = model.getWeights() % % ## Output % * __weights__ vector of mixtures weights. @@ -539,7 +528,7 @@ function load(this, fname_or_str, varargin) function means = getMeans(this) %GETMEANS Returns the cluster centers (means of the Gaussian mixture) % - % means = model.getMeans() + % means = model.getMeans() % % ## Output % * __means__ matrix of cluster means. @@ -556,7 +545,7 @@ function load(this, fname_or_str, varargin) function covs = getCovs(this) %GETCOVS Returns covariation matrices % - % covs = model.getCovs() + % covs = model.getCovs() % % ## Output % * __covs__ cell array of covariance matrices. @@ -574,27 +563,27 @@ function load(this, fname_or_str, varargin) function [logLikelihoods, labels, probs] = trainEM(this, samples) %TRAINEM Estimate the Gaussian mixture parameters from a samples set % - % [logLikelihoods, labels, probs] = model.trainEM(samples) + % [logLikelihoods, labels, probs] = model.trainEM(samples) % % ## Input % * __samples__ Samples from which the Gaussian mixture model will - % be estimated. It should be a one-channel matrix, each row - % of which is a sample. If the matrix does not have `double` - % type it will be converted to the inner matrix of such type - % for the further computing. + % be estimated. It should be a one-channel matrix, each row of + % which is a sample. If the matrix does not have `double` type + % it will be converted to the inner matrix of such type for the + % further computing. % % ## Output % * __logLikelihoods__ The optional output matrix that contains a - % likelihood logarithm value for each sample. It has - % `nsamples-by-1` size and `double` type. + % likelihood logarithm value for each sample. It has + % `nsamples-by-1` size and `double` type. % * __labels__ The optional output "class label" for each sample: - % `labels_i = argmax_{k}(p_{i,k}), i=1..N` (indices of the - % most probable mixture component for each sample). It has - % `nsamples-by-1` size and `single` type. + % `labels_i = argmax_{k}(p_{i,k}), i=1..N` (indices of the most + % probable mixture component for each sample). It has + % `nsamples-by-1` size and `single` type. % * __probs__ The optional output matrix that contains posterior - % probabilities of each Gaussian mixture component given the - % each sample. It has `nsamples-by-ClustersNumber` size and - % `double` type. + % probabilities of each Gaussian mixture component given each + % sample. It has `nsamples-by-ClustersNumber` size and `double` + % type. % % This variation starts with Expectation step. Initial values of % the model parameters will be estimated by the k-means algorithm. @@ -622,19 +611,19 @@ function load(this, fname_or_str, varargin) function [logLikelihoods, labels, probs] = trainE(this, samples, means0, varargin) %TRAINE Estimate the Gaussian mixture parameters from a samples set, starting from the Expectation step % - % [logLikelihoods, labels, probs] = model.trainE(samples, means0) - % [...] = model.trainE(..., 'OptionName', optionValue, ...) + % [logLikelihoods, labels, probs] = model.trainE(samples, means0) + % [...] = model.trainE(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Samples from which the Gaussian mixture model will - % be estimated. It should be a one-channel matrix, each row - % of which is a sample. If the matrix does not have `double` - % type it will be converted to the inner matrix of such type - % for the further computing. + % be estimated. It should be a one-channel matrix, each row of + % which is a sample. If the matrix does not have `double` type + % it will be converted to the inner matrix of such type for the + % further computing. % * __means0__ Initial means `a_k` of mixture components. It is a - % one-channel matrix of `ClustersNumber-by-dims` size. If the - % matrix does not have `double` type it will be converted to - % the inner matrix of such type for the further computing. + % one-channel matrix of `ClustersNumber-by-dims` size. If the + % matrix does not have `double` type it will be converted to the + % inner matrix of such type for the further computing. % % ## Output % * __logLikelihoods__ See the cv.EM.trainEM method. @@ -643,13 +632,13 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Covs0__ The vector of initial covariance matrices `S_k` of - % mixture components. Each of covariance matrices is a - % one-channel matrix of `dims-by-dims` size. If the matrices - % do not have `double` type they will be converted to the - % inner matrices of such type for the further computing. + % mixture components. Each of covariance matrices is a + % one-channel matrix of `dims-by-dims` size. If the matrices do + % not have `double` type they will be converted to the inner + % matrices of such type for the further computing. % * __Weights0__ Initial weights `PI_k` of mixture components. It - % should be a one-channel floating-point vector of length - % `ClustersNumber`. + % should be a one-channel floating-point vector of length + % `ClustersNumber`. % % This variation starts with Expectation step. You need to provide % initial means `a_k` of mixture components. Optionally you can @@ -664,18 +653,17 @@ function load(this, fname_or_str, varargin) function [logLikelihoods, labels, probs] = trainM(this, samples, probs0) %TRAINM Estimate the Gaussian mixture parameters from a samples set, starting from the Maximization step % - % [logLikelihoods, labels, probs] = model.trainM(samples, probs0) + % [logLikelihoods, labels, probs] = model.trainM(samples, probs0) % % ## Input % * __samples__ Samples from which the Gaussian mixture model will - % be estimated. It should be a one-channel matrix, each row - % of which is a sample. If the matrix does not have `double` - % type it will be converted to the inner matrix of such type - % for the further computing. + % be estimated. It should be a one-channel matrix, each row of + % which is a sample. If the matrix does not have `double` type + % it will be converted to the inner matrix of such type for the + % further computing. % * __probs0__ Initial probabilities `p_{i,k}` of sample `i` to - % belong to mixture component `k`. It is a one-channel - % floating-point matrix of `nsamples-by-ClustersNumber` - % size. + % belong to mixture component `k`. It is a one-channel + % floating-point matrix of `nsamples-by-ClustersNumber` size. % % ## Output % * __logLikelihoods__ See the cv.EM.trainEM method. @@ -693,25 +681,24 @@ function load(this, fname_or_str, varargin) function [logLikelihoods, labels, probs] = predict2(this, samples) %PREDICT2 Returns log-likelihood values and indices of the most probable mixture component for given samples % - % [logLikelihoods, labels, probs] = model.predict2(sample) + % [logLikelihoods, labels, probs] = model.predict2(sample) % % ## Input % * __samples__ Samples for classification. It should be a - % one-channel matrix of size `nsamples-by-dims` with each - % row representing one sample. + % one-channel matrix of size `nsamples-by-dims` with each row + % representing one sample. % % ## Output - % * __logLikelihoods__ Output vector that contains a - % likelihood logarithm value for each sample. It has - % `nsamples-by-1` size and `double` type. + % * __logLikelihoods__ Output vector that contains a likelihood + % logarithm value for each sample. It has `nsamples-by-1` size + % and `double` type. % * __labels__ Output "class label" for each sample: - % `labels_i = argmax_{k}(p_{i,k}), i=1..N` (indices of the - % most probable mixture component for each sample). It has - % `nsamples-by-1` size and `double` type. + % `labels_i = argmax_{k}(p_{i,k}), i=1..N` (indices of the most + % probable mixture component for each sample). It has + % `nsamples-by-1` size and `double` type. % * __probs__ Optional output matrix that contains posterior - % probabilities of each mixture component given the sample. - % It has `nsamples-by-ClustersNumber` size and `double` - % type. + % probabilities of each mixture component given the sample. It + % has `nsamples-by-ClustersNumber` size and `double` type. % % See also: cv.EM.predict % diff --git a/+cv/EMD.m b/+cv/EMD.m index 455343fbe..cea462e4e 100644 --- a/+cv/EMD.m +++ b/+cv/EMD.m @@ -1,51 +1,50 @@ %EMD Computes the "minimal work" distance between two weighted point configurations % -% d = cv.EMD(signature1, signature2) -% d = cv.EMD(signature1, signature2, 'OptionName', optionValue, ...) -% [d, lowerBound, flow] = cv.EMD(...) +% d = cv.EMD(signature1, signature2) +% d = cv.EMD(signature1, signature2, 'OptionName', optionValue, ...) +% [d, lowerBound, flow] = cv.EMD(...) % % ## Input % * __signature1__ First signature, a `size1-by-(dims+1)` floating-point -% matrix. Each row stores the point weight followed by the point -% coordinates `[w,x1,x2,...,xn]`. The matrix is allowed to have a single -% column (weights only) if the user-defined `Cost` matrix is used. -% The weights must be non-negative and have at least one non-zero value. +% matrix. Each row stores the point weight followed by the point coordinates +% `[w,x1,x2,...,xn]`. The matrix is allowed to have a single column (weights +% only) if the user-defined `Cost` matrix is used. The weights must be +% non-negative and have at least one non-zero value. % * __signature2__ Second signature `size2-by-(dims+1)` of the same format as -% `signature1`, though the number of rows may be different. The total -% weights may be different. In this case an extra "dummy" point is added -% to either `signature1` or `signature2`. The weights must be -% non-negative and have at least one non-zero value. +% `signature1`, though the number of rows may be different. The total +% weights may be different. In this case an extra "dummy" point is added to +% either `signature1` or `signature2`. The weights must be non-negative and +% have at least one non-zero value. % % ## Output % * __d__ Output distance value. % * __lowerBound__ Optional output lower boundary of a distance between the -% two signatures. See 'LowerBound' in options. +% two signatures. See 'LowerBound' in options. % * __flow__ Optional resultant `size1-by-size2` flow matrix of type `single`. -% `flow(i,j)` is a flow from i-th point of `signature1` to j-th point -% of `signature2`. +% `flow(i,j)` is a flow from i-th point of `signature1` to j-th point of +% `signature2`. % % ## Options % * __DistType__ Used metric, default 'L2'. One of: -% * __L1__ Manhattan distance: `d = |x1-x2| + |y1-y2|` -% * __L2__ Euclidean distance: `d = sqrt((x1-x2)^2 + (y1-y2)^2)` -% * __C__ Chebyshev distance: `d = max(|x1-x2|,|y1-y2|)` -% * __User__ User-defined distance, means that a pre-calculated cost -% matrix is used. Should be used when `Cost` is set. +% * __L1__ Manhattan distance: `d = |x1-x2| + |y1-y2|` +% * __L2__ Euclidean distance: `d = sqrt((x1-x2)^2 + (y1-y2)^2)` +% * __C__ Chebyshev distance: `d = max(|x1-x2|,|y1-y2|)` +% * __User__ User-defined distance, means that a pre-calculated cost matrix +% is used. Should be used when `Cost` is set. % * __Cost__ User-defined `size1-by-size2` cost matrix. Also, if a cost matrix -% is used, output lower boundary `lowerBound` cannot be calculated -% because it needs a metric function. Not set by default +% is used, output lower boundary `lowerBound` cannot be calculated because +% it needs a metric function. Not set by default % * __LowerBound__ Optional input/output parameter: lower boundary of a -% distance between the two signatures that is a distance between mass -% centers. The lower boundary may not be calculated if the user-defined -% cost matrix is used, the total weights of point configurations are not -% equal, or if the signatures consist of weights only (the signature -% matrices have a single column). If the calculated distance between -% mass centers is greater or equal to `LowerBound` (it means that the -% signatures are far enough), the function does not calculate EMD. In -% any case `LowerBound` is set to the calculated distance between mass -% centers on return. Thus, if you want to calculate both distance -% between mass centers and EMD, `LowerBound` should be set to 0. -% default 0. +% distance between the two signatures that is a distance between mass +% centers. The lower boundary may not be calculated if the user-defined cost +% matrix is used, the total weights of point configurations are not equal, +% or if the signatures consist of weights only (the signature matrices have +% a single column). If the calculated distance between mass centers is +% greater or equal to `LowerBound` (it means that the signatures are far +% enough), the function does not calculate EMD. In any case `LowerBound` is +% set to the calculated distance between mass centers on return. Thus, if +% you want to calculate both distance between mass centers and EMD, +% `LowerBound` should be set to 0. default 0. % % The function computes the earth mover distance and/or a lower boundary of % the distance between the two weighted point configurations. One of the diff --git a/+cv/EMDL1.m b/+cv/EMDL1.m index f2d41c554..e50eabd13 100644 --- a/+cv/EMDL1.m +++ b/+cv/EMDL1.m @@ -1,12 +1,12 @@ %EMDL1 Computes the "minimal work" distance between two weighted point configurations % -% dist = cv.EMDL1(signature1, signature2) +% dist = cv.EMDL1(signature1, signature2) % % ## Input % * __signature1__ First signature, a single column floating-point matrix. -% Each row is the value of the histogram in each bin. +% Each row is the value of the histogram in each bin. % * __signature2__ Second signature of the same format and size as -% `signature1`. +% `signature1`. % % ## Output % * __dist__ output distance. diff --git a/+cv/ERTrees.m_ b/+cv/ERTrees.m_ index 9271066bc..98b25d387 100644 --- a/+cv/ERTrees.m_ +++ b/+cv/ERTrees.m_ @@ -146,8 +146,8 @@ classdef ERTrees < handle function this = ERTrees(varargin) %ERTREES Creates/trains a new Extremely Random Trees model % - % model = cv.ERTrees() - % model = cv.ERTrees(...) + % model = cv.ERTrees() + % model = cv.ERTrees(...) % % The first variant creates an empty model. Use cv.ERTrees.train % to train the model, or cv.ERTrees.load to load a pre-trained @@ -167,7 +167,7 @@ classdef ERTrees < handle function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.ERTrees % @@ -181,7 +181,7 @@ classdef ERTrees < handle function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -197,11 +197,11 @@ classdef ERTrees < handle function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.ERTrees.clear, cv.ERTrees.load % @@ -211,17 +211,17 @@ classdef ERTrees < handle function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -235,23 +235,22 @@ classdef ERTrees < handle function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -265,11 +264,11 @@ classdef ERTrees < handle function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ERTrees.save, cv.ERTrees.load % @@ -282,7 +281,7 @@ classdef ERTrees < handle function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -295,7 +294,7 @@ classdef ERTrees < handle function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -308,11 +307,11 @@ classdef ERTrees < handle function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % See also: cv.ERTrees.isTrained % @@ -322,116 +321,111 @@ classdef ERTrees < handle function status = train(this, samples, responses, varargin) %TRAIN Trains the Extremely Random Trees model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Row vectors of feature. % * __responses__ Output of the corresponding feature vectors. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ See the predict method. default false % * __PredictSum__ See the predict method. default false % * __PredictMaxVote__ See the predict method. default false % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -453,10 +447,10 @@ classdef ERTrees < handle function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -470,14 +464,13 @@ classdef ERTrees < handle % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -491,12 +484,12 @@ classdef ERTrees < handle function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input row vectors (one or more) stored as rows of - % a floating-point matrix. + % a floating-point matrix. % % ## Output % * __results__ Output labels or regression values. @@ -504,30 +497,29 @@ classdef ERTrees < handle % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % sum), not the class label. default false + % sum), not the class label. default false % * __CompressedInput__ compressed data, containing only the - % active samples/variables. default false + % active samples/variables. default false % * __PreprocessedInput__ This parameter is normally set to false, - % implying a regular input. If it is true, the method - % assumes that all the values of the discrete input - % variables have been already normalized to 0..NCategories - % ranges since the decision tree uses such normalized - % representation internally. It is useful for faster - % prediction with tree ensembles. For ordered input - % variables, the flag is not used. Default false + % implying a regular input. If it is true, the method assumes + % that all the values of the discrete input variables have been + % already normalized to 0..NCategories ranges since the decision + % tree uses such normalized representation internally. It is + % useful for faster prediction with tree ensembles. For ordered + % input variables, the flag is not used. Default false % * __PredictAuto__ Setting this to true, overrides all of the - % other `Predict*` flags. It automatically chooses between - % `PredictSum` and `PredictMaxVote` (if the model is a - % regressor or the number of classes are 2 with `RawOutput` - % set then it picks `PredictSum`, otherwise it picks - % `PredictMaxVote` by default). default true + % other `Predict*` flags. It automatically chooses between + % `PredictSum` and `PredictMaxVote` (if the model is a regressor + % or the number of classes are 2 with `RawOutput` set then it + % picks `PredictSum`, otherwise it picks `PredictMaxVote` by + % default). default true % * __PredictSum__ If true then return sum of votes instead of the - % class label. default false + % class label. default false % * __PredictMaxVote__ If true then return the class label with - % the max vote. default false + % the max vote. default false % % This method returns the cumulative result from all the trees in % the forest (the class that receives the majority of voices, or @@ -544,12 +536,12 @@ classdef ERTrees < handle function v = getVarImportance(this) %GETVARIMPORTANCE Returns the variable importance array % - % v = classifier.getVarImportance() + % v = model.getVarImportance() % % ## Output % * __v__ the variable importance vector, computed at the training - % stage when `CalculateVarImportance` is set to true. If - % this flag was set to false, the empty matrix is returned. + % stage when `CalculateVarImportance` is set to true. If this + % flag was set to false, the empty matrix is returned. % % See also: cv.ERTrees.CalculateVarImportance % @@ -559,7 +551,7 @@ classdef ERTrees < handle function roots = getRoots(this) %GETROOTS Returns indices of root nodes % - % roots = classifier.getRoots() + % roots = model.getRoots() % % ## Output % * __roots__ vector of indices. @@ -572,24 +564,22 @@ classdef ERTrees < handle function nodes = getNodes(this) %GETNODES Returns all the nodes % - % nodes = classifier.getNodes() + % nodes = model.getNodes() % % ## Output % * __nodes__ Struct-array with the following fields: - % * __value__ Value at the node: a class label in case of - % classification or estimated function value in case - % of regression. - % * __classIdx__ Class index normalized to `0..class_count-1` - % range and assigned to the node. It is used - % internally in classification trees and tree - % ensembles. - % * __parent__ Index of the parent node. - % * __left__ Index of the left child node. - % * __right__ Index of right child node. - % * __defaultDir__ Default direction where to go (-1: left - % or +1: right). It helps in the case of missing - % values. - % * __split__ Index of the first split. + % * __value__ Value at the node: a class label in case of + % classification or estimated function value in case of + % regression. + % * __classIdx__ Class index normalized to `0..class_count-1` + % range and assigned to the node. It is used internally in + % classification trees and tree ensembles. + % * __parent__ Index of the parent node. + % * __left__ Index of the left child node. + % * __right__ Index of right child node. + % * __defaultDir__ Default direction where to go (-1 left or +1 + % right). It helps in the case of missing values. + % * __split__ Index of the first split. % % all the node indices are zero-based indices in the returned % vector. @@ -602,26 +592,25 @@ classdef ERTrees < handle function splits = getSplits(this) %GETSPLITS Returns all the splits % - % splits = classifier.getSplits() + % splits = model.getSplits() % % ## Output % * __splits__ Struct-array with the following fields: - % * __varIdx__ Index of variable on which the split is - % created. - % * __inversed__ If true, then the inverse split rule is - % used (i.e. left and right branches are exchanged in - % the rule expressions below). - % * __quality__ The split quality, a positive number. It is - % used to choose the best split. (It is also used to - % compute variable importance). - % * __next__ Index of the next split in the list of splits - % for the node (surrogate splits). - % * __c__ The threshold value in case of split on an ordered - % variable. The rule is: - % `if var_value < c, next_node = left; else next_node = right; end` - % * __subsetOfs__ Offset of the bitset used by the split on - % a categorical variable. The rule is: - % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` + % * __varIdx__ Index of variable on which the split is created. + % * __inversed__ If true, then the inverse split rule is used + % (i.e. left and right branches are exchanged in the rule + % expressions below). + % * __quality__ The split quality, a positive number. It is used + % to choose the best split. (It is also used to compute + % variable importance). + % * __next__ Index of the next split in the list of splits for + % the node (surrogate splits). + % * __c__ The threshold value in case of split on an ordered + % variable. The rule is: + % `if var_value < c, next_node = left; else next_node = right; end` + % * __subsetOfs__ Offset of the bitset used by the split on a + % categorical variable. The rule is: + % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` % % all the split indices are zero-based indices in the returned % vector. @@ -634,7 +623,7 @@ classdef ERTrees < handle function subsets = getSubsets(this) %GETSUBSETS Returns all the bitsets for categorical splits % - % subsets = classifier.getSubsets() + % subsets = model.getSubsets() % % ## Output % * __subsets__ vector of indices. diff --git a/+cv/Estimator.m b/+cv/Estimator.m index abb4c4db8..bdeb86136 100644 --- a/+cv/Estimator.m +++ b/+cv/Estimator.m @@ -12,24 +12,25 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = Estimator(estimatorType, varargin) %ESTIMATOR Constructor % - % obj = cv.Estimator(estimatorType) - % obj = cv.Estimator(..., 'OptionName',optionValue, ...) + % obj = cv.Estimator(estimatorType) + % obj = cv.Estimator(..., 'OptionName',optionValue, ...) % % ## Input % * __estimatorType__ Estimator type. One of: - % * __HomographyBasedEstimator__ Homography based rotation - % estimator. - % * __AffineBasedEstimator__ Affine transformation based - % estimator. This estimator uses pairwise - % tranformations estimated by matcher to estimate - % final transformation for each camera. + % * __HomographyBasedEstimator__ Homography based rotation + % estimator. + % * __AffineBasedEstimator__ Affine transformation based + % estimator. This estimator uses pairwise tranformations + % estimated by matcher to estimate final transformation for + % each camera. % % The following are options for the various algorithms: % @@ -44,7 +45,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Estimator % @@ -55,7 +56,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = Estimator_(this.id, 'typeid'); end @@ -66,24 +70,24 @@ function delete(this) function [cameras,success] = estimate(this, features, pairwise_matches) %ESTIMATE Estimates camera parameters % - % cameras = obj.estimate(features, pairwise_matches) - % [cameras,success] = obj.estimate(...) + % cameras = obj.estimate(features, pairwise_matches) + % [cameras, success] = obj.estimate(...) % % ## Input % * __features__ Features of images. See cv.FeaturesFinder. - % * **pairwise_matches** Pairwise matches of images. - % See cv.FeaturesMatcher. + % * **pairwise_matches** Pairwise matches of images. See + % cv.FeaturesMatcher. % % ## Output % * __cameras__ Estimated camera parameters. Structure that - % describes camera parameters with the following fields: - % * __aspect__ Aspect ratio. - % * __focal__ Focal length. - % * __ppx__ Principal point X. - % * __ppy__ Principal point Y. - % * __R__ 3x3 camera rotation matrix. - % * __t__ 3x1 camera translation vector. - % * __K__ 3x3 camera intrinsic parameters. + % describes camera parameters with the following fields: + % * __aspect__ Aspect ratio. + % * __focal__ Focal length. + % * __ppx__ Principal point X. + % * __ppy__ Principal point Y. + % * __R__ 3x3 camera rotation matrix. + % * __t__ 3x1 camera translation vector. + % * __K__ 3x3 camera intrinsic parameters. % * __success__ True in case of success, false otherwise. % % See also: cv.Estimator.Estimator @@ -97,8 +101,8 @@ function delete(this) function [K,success] = calibrateRotatingCamera(Hs) %CALIBRATEROTATINGCAMERA Calibrate rotating camera % - % K = cv.Estimator.calibrateRotatingCamera(Hs) - % [K,success] = cv.Estimator.calibrateRotatingCamera(Hs) + % K = cv.Estimator.calibrateRotatingCamera(Hs) + % [K,success] = cv.Estimator.calibrateRotatingCamera(Hs) % % ## Input % * __Hs__ Cell-array of 3x3 double matrices. @@ -113,7 +117,7 @@ function delete(this) function focals = estimateFocal(features, pairwise_matches) %ESTIMATEFOCAL Estimates focal lengths for each given camera % - % focals = cv.Estimator.estimateFocal(features, pairwise_matches) + % focals = cv.Estimator.estimateFocal(features, pairwise_matches) % % ## Input % * __features__ Features of images. @@ -121,7 +125,7 @@ function delete(this) % % ## Output % * __focals__ Estimated focal lengths for each camera, vector of - % doubles. + % doubles. % focals = Estimator_(0, 'estimateFocal', features, pairwise_matches); end @@ -129,8 +133,8 @@ function delete(this) function [f0, f1, f0_ok, f1_ok] = focalsFromHomography(H) %FOCALSFROMHOMOGRAPHY Tries to estimate focal lengths from the given homography under the assumption that the camera undergoes rotations around its centre only % - % [f0, f1] = cv.Estimator.focalsFromHomography(H) - % [f0, f1, f0_ok, f1_ok] = cv.Estimator.focalsFromHomography(H) + % [f0, f1] = cv.Estimator.focalsFromHomography(H) + % [f0, f1, f0_ok, f1_ok] = cv.Estimator.focalsFromHomography(H) % % ## Input % * __H__ Homography, 3x3 double matrix. @@ -139,9 +143,9 @@ function delete(this) % * __f0__ Estimated focal length along X axis. % * __f1__ Estimated focal length along Y axis. % * **f0_ok** True, if `f0` was estimated successfully, false - % otherwise. + % otherwise. % * **f1_ok** True, if `f1` was estimated successfully, false - % otherwise. + % otherwise. % % ## References % > Heung-Yeung Shum and Richard Szeliski. "Construction of diff --git a/+cv/ExposureCompensator.m b/+cv/ExposureCompensator.m index 6f5216969..b1ec32a6b 100644 --- a/+cv/ExposureCompensator.m +++ b/+cv/ExposureCompensator.m @@ -5,27 +5,27 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = ExposureCompensator(compensatorType, varargin) %EXPOSURECOMPENSATOR Constructor % - % obj = cv.ExposureCompensator(compensatorType) - % obj = cv.ExposureCompensator(compensatorType, 'OptionName',optionValue, ...) + % obj = cv.ExposureCompensator(compensatorType) + % obj = cv.ExposureCompensator(compensatorType, 'OptionName',optionValue, ...) % % ## Input % * __compensatorType__ exposure compensator type. One of: - % * __NoExposureCompensator__ Stub exposure compensator - % which does nothing. - % * __GainCompensator__ Exposure compensator which tries to - % remove exposure related artifacts by adjusting image - % intensities, see [BL07] and [WJ10] for details. - % * __BlocksGainCompensator__ Exposure compensator which - % tries to remove exposure related artifacts by - % adjusting image block intensities, see [UES01] for - % details. + % * __NoExposureCompensator__ Stub exposure compensator which + % does nothing. + % * __GainCompensator__ Exposure compensator which tries to + % remove exposure related artifacts by adjusting image + % intensities, see [BL07] and [WJ10] for details. + % * __BlocksGainCompensator__ Exposure compensator which tries + % to remove exposure related artifacts by adjusting image + % block intensities, see [UES01] for details. % % ## Options % The following are options for the various algorithms: @@ -61,7 +61,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ExposureCompensator % @@ -72,7 +72,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = ExposureCompensator_(this.id, 'typeid'); end @@ -83,14 +86,14 @@ function delete(this) function feed(this, corners, images, masks) %FEED Processes the images % - % obj.feed(corners, images, masks) + % obj.feed(corners, images, masks) % % ## Input % * __corners__ Source image top-left corners. Cell-array of 2D - % points `{[x,y], ...}`. + % points `{[x,y], ...}`. % * __images__ Source cell-array of images. % * __masks__ Cell-array of image masks to update (the value 255 - % is used to detect where image is). + % is used to detect where image is). % % See also: cv.ExposureCompensator.apply % @@ -100,7 +103,7 @@ function feed(this, corners, images, masks) function image = apply(this, index, corner, image, mask) %APPLY Compensate exposure in the specified image % - % image = obj.apply(index, corner, image, mask) + % image = obj.apply(index, corner, image, mask) % % ## Input % * __index__ Image index. @@ -122,7 +125,7 @@ function feed(this, corners, images, masks) function g = gains(this) %GAINS Gains % - % g = obj.gains() + % g = obj.gains() % % ## Output % * __g__ double vector of gains. diff --git a/+cv/FAST.m b/+cv/FAST.m index 3e8e7a2e8..694e363fb 100644 --- a/+cv/FAST.m +++ b/+cv/FAST.m @@ -1,37 +1,37 @@ %FAST Detects corners using the FAST algorithm % -% keypoints = cv.FAST(im) -% keypoints = cv.FAST(im, 'OptionName', optionValue, ...) +% keypoints = cv.FAST(im) +% keypoints = cv.FAST(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ 8-bit grayscale image where keypoints (corners) are to be detected. % % ## Output % * __keypoints__ Keypoints detected on the image. A 1-by-N structure array. -% It has the following fields: -% * __pt__ coordinates of the keypoint [x,y] -% * __size__ diameter of the meaningful keypoint neighborhood -% * __angle__ computed orientation of the keypoint (-1 if not applicable). -% Its possible values are in a range [0,360) degrees. It is measured -% relative to image coordinate system (y-axis is directed downward), -% ie in clockwise. -% * __response__ the response by which the most strong keypoints have been -% selected. Can be used for further sorting or subsampling. -% * __octave__ octave (pyramid layer) from which the keypoint has been -% extracted. -% * **class_id** object id that can be used to clustered keypoints by an -% object they belong to. +% It has the following fields: +% * __pt__ coordinates of the keypoint [x,y] +% * __size__ diameter of the meaningful keypoint neighborhood +% * __angle__ computed orientation of the keypoint (-1 if not applicable). +% Its possible values are in a range [0,360) degrees. It is measured +% relative to image coordinate system (y-axis is directed downward), i.e +% in clockwise. +% * __response__ the response by which the most strong keypoints have been +% selected. Can be used for further sorting or subsampling. +% * __octave__ octave (pyramid layer) from which the keypoint has been +% extracted. +% * **class_id** object id that can be used to clustered keypoints by an +% object they belong to. % % ## Options % * __Threshold__ Threshold on difference between intensity of the central -% pixel and pixels on a circle around this pixel. See the algorithm -% description [Rosten06]. default 10. -% * __NonmaxSuppression__ If it is true, non-maximum supression is applied -% to detected corners (keypoints). default true. +% pixel and pixels on a circle around this pixel. See the algorithm +% description [Rosten06]. default 10. +% * __NonmaxSuppression__ If it is true, non-maximum supression is applied to +% detected corners (keypoints). default true. % * __Type__ one of the three neighborhoods as defined in the paper: -% * **TYPE_9_16** (default) -% * **TYPE_7_12** -% * **TYPE_5_8** +% * **TYPE_9_16** (default) +% * **TYPE_7_12** +% * **TYPE_5_8** % % Detects corners using the FAST algorithm by [Rosten06]. % diff --git a/+cv/FarnebackOpticalFlow.m b/+cv/FarnebackOpticalFlow.m index 7ed1e19c8..812456a21 100644 --- a/+cv/FarnebackOpticalFlow.m +++ b/+cv/FarnebackOpticalFlow.m @@ -8,7 +8,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -35,7 +36,7 @@ function this = FarnebackOpticalFlow() %FARNEBACKOPTICALFLOW Creates instance of FarnebackOpticalFlow % - % obj = cv.FarnebackOpticalFlow() + % obj = cv.FarnebackOpticalFlow() % % See also: cv.FarnebackOpticalFlow.calc % @@ -45,7 +46,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FarnebackOpticalFlow % @@ -59,17 +60,17 @@ function delete(this) function flow = calc(this, I0, I1, varargin) %CALC Calculates an optical flow % - % flow = obj.calc(I0, I1) - % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) + % flow = obj.calc(I0, I1) + % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * __flow__ computed flow image that has the same size as `I0` - % and type `single` (2-channels). + % and type `single` (2-channels). % % ## Options % * __InitialFlow__ specify the initial flow. Not set by default. @@ -82,7 +83,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Releases all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % FarnebackOpticalFlow_(this.id, 'collectGarbage'); end @@ -93,7 +94,7 @@ function collectGarbage(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FarnebackOpticalFlow.empty % @@ -103,11 +104,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.FarnebackOpticalFlow.clear % @@ -117,11 +118,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FarnebackOpticalFlow.save, cv.FarnebackOpticalFlow.load % @@ -131,7 +132,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -146,21 +147,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/FastFeatureDetector.m b/+cv/FastFeatureDetector.m index 2bf2248dd..ec845a4e0 100644 --- a/+cv/FastFeatureDetector.m +++ b/+cv/FastFeatureDetector.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -39,14 +40,13 @@ function this = FastFeatureDetector(varargin) %FASTFEATUREDETECTOR Constructor % - % obj = cv.FastFeatureDetector() - % obj = cv.FastFeatureDetector(..., 'OptionName',optionValue, ...) + % obj = cv.FastFeatureDetector() + % obj = cv.FastFeatureDetector(..., 'OptionName',optionValue, ...) % % ## Options - % * __Threshold__ See cv.FastFeatureDetector.Threshold, - % default 10 + % * __Threshold__ See cv.FastFeatureDetector.Threshold, default 10 % * __NonmaxSuppression__ See - % cv.FastFeatureDetector.NonmaxSuppression, default true + % cv.FastFeatureDetector.NonmaxSuppression, default true % * __Type__ See cv.FastFeatureDetector.Type, default `TYPE_9_16` % % See also: cv.FastFeatureDetector.detect @@ -57,7 +57,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FastFeatureDetector % @@ -68,7 +68,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -82,7 +82,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FastFeatureDetector.empty, % cv.FastFeatureDetector.load @@ -93,11 +93,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.FastFeatureDetector.clear, % cv.FastFeatureDetector.load @@ -108,7 +108,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -124,21 +124,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -152,11 +152,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FastFeatureDetector.save, % cv.FastFeatureDetector.load @@ -170,28 +170,26 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image where - % keypoints (corners) are detected. + % keypoints (corners) are detected. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.FastFeatureDetector.FastFeatureDetector % diff --git a/+cv/FeatureDetector.m b/+cv/FeatureDetector.m index 86fc4bb18..0e44167f4 100644 --- a/+cv/FeatureDetector.m +++ b/+cv/FeatureDetector.m @@ -10,49 +10,50 @@ % % ## Example % - % detector = cv.FeatureDetector('ORB'); - % keypoints = detector.detect(img); + % detector = cv.FeatureDetector('ORB'); + % keypoints = detector.detect(img); % % See also: cv.DescriptorExtractor, cv.KeyPointsFilter, cv.drawKeypoints % properties (SetAccess = private) - id % Object ID - Type % Type of the detector + % Object ID + id + % Type of the detector + Type end methods function this = FeatureDetector(detectorType, varargin) %FEATUREDETECTOR Creates a feature detector by name % - % detector = cv.FeatureDetector(type) - % detector = cv.FeatureDetector(type, 'OptionName',optionValue, ...) + % detector = cv.FeatureDetector(type) + % detector = cv.FeatureDetector(type, 'OptionName',optionValue, ...) % % ## Input % * __type__ The following detector types are supported: - % * __BRISK__ see cv.BRISK - % * __ORB__ see cv.ORB - % * __MSER__ see cv.MSER - % * __FastFeatureDetector__ see cv.FastFeatureDetector - % (default) - % * __GFTTDetector__ see cv.GFTTDetector - % * __SimpleBlobDetector__ see cv.SimpleBlobDetector - % * __KAZE__ see cv.KAZE - % * __AKAZE__ see cv.AKAZE - % * __AgastFeatureDetector__ see cv.AgastFeatureDetector - % * __SIFT__ see cv.SIFT (requires `xfeatures2d` module) - % * __SURF__ see cv.SURF (requires `xfeatures2d` module) - % * __StarDetector__ see cv.StarDetector (requires - % `xfeatures2d` module) - % * __MSDDetector__ see cv.MSDDetector (requires - % `xfeatures2d` module) - % * __HarrisLaplaceFeatureDetector__ see - % cv.HarrisLaplaceFeatureDetector (requires - % `xfeatures2d` module) + % * __BRISK__ see cv.BRISK + % * __ORB__ see cv.ORB + % * __MSER__ see cv.MSER + % * __FastFeatureDetector__ see cv.FastFeatureDetector (default) + % * __GFTTDetector__ see cv.GFTTDetector + % * __SimpleBlobDetector__ see cv.SimpleBlobDetector + % * __KAZE__ see cv.KAZE + % * __AKAZE__ see cv.AKAZE + % * __AgastFeatureDetector__ see cv.AgastFeatureDetector + % * __SIFT__ see cv.SIFT (requires `xfeatures2d` module) + % * __SURF__ see cv.SURF (requires `xfeatures2d` module) + % * __StarDetector__ see cv.StarDetector (requires `xfeatures2d` + % module) + % * __MSDDetector__ see cv.MSDDetector (requires `xfeatures2d` + % module) + % * __HarrisLaplaceFeatureDetector__ see + % cv.HarrisLaplaceFeatureDetector (requires `xfeatures2d` + % module) % % ## Options - % Refer to the constructors of each feature detector for a - % list of supported options. + % Refer to the constructors of each feature detector for a list of + % supported options. % % See also: cv.FeatureDetector.detect % @@ -64,7 +65,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FeatureDetector % @@ -75,7 +76,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -89,7 +90,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FeatureDetector.empty, cv.FeatureDetector.load % @@ -99,11 +100,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.FeatureDetector.clear, cv.FeatureDetector.load % @@ -113,7 +114,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -129,21 +130,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -157,11 +158,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FeatureDetector.save, cv.FeatureDetector.load % @@ -174,41 +175,39 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant). % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. Each keypoint is a struct with the following - % fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. + % Each keypoint is a struct with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.FeatureDetector.FeatureDetector % diff --git a/+cv/FeaturesFinder.m b/+cv/FeaturesFinder.m index 9da313f84..32f41f976 100644 --- a/+cv/FeaturesFinder.m +++ b/+cv/FeaturesFinder.m @@ -5,25 +5,25 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = FeaturesFinder(finderType, varargin) %FEATURESFINDER Constructor % - % obj = cv.FeaturesFinder(finderType) - % obj = cv.FeaturesFinder(finderType, 'OptionName',optionValue, ...) + % obj = cv.FeaturesFinder(finderType) + % obj = cv.FeaturesFinder(finderType, 'OptionName',optionValue, ...) % % ## Input % * __finderType__ Feature finder type. One of: - % * __OrbFeaturesFinder__ ORB features finder. See cv.ORB - % * __AKAZEFeaturesFinder__ AKAZE features finder. See - % cv.AKAZE - % * __SurfFeaturesFinder__ SURF features finder. See cv.SURF - % (requires `xfeatures2d` module) - % * __SurfFeaturesFinderGpu__ (requires CUDA and - % `xfeatures2d` module) + % * __OrbFeaturesFinder__ ORB features finder. See cv.ORB + % * __AKAZEFeaturesFinder__ AKAZE features finder. See cv.AKAZE + % * __SurfFeaturesFinder__ SURF features finder. See cv.SURF + % (requires `xfeatures2d` module) + % * __SurfFeaturesFinderGpu__ (requires CUDA and `xfeatures2d` + % module) % % ## Options % The following are options for the various finders: @@ -58,7 +58,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FeaturesFinder % @@ -69,7 +69,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = FeaturesFinder_(this.id, 'typeid'); end @@ -80,7 +83,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Frees unused memory allocated before if there is any % - % obj.collectGarbage() + % obj.collectGarbage() % % See also: cv.FeaturesFinder.FeaturesFinder % @@ -91,11 +94,11 @@ function collectGarbage(this) function tf = isThreadSafe(this) %ISTHREADSAFE Determine thread-safety % - % tf = obj.isThreadSafe() + % tf = obj.isThreadSafe() % % ## Output % * __tf__ True, if it's possible to use the same finder instance - % in parallel, false otherwise. + % in parallel, false otherwise. % % See also: cv.FeaturesFinder.FeaturesFinder % @@ -106,21 +109,21 @@ function collectGarbage(this) function features = find(this, img, varargin) %FIND Finds features in the given image % - % features = obj.find(img) - % features = obj.find(img, rois) + % features = obj.find(img) + % features = obj.find(img, rois) % % ## Input % * __img__ Source image. % * __rois__ Regions of interest. A cell array of 4-element - % vectors `{[x y w h], ...}` + % vectors `{[x y w h], ...}` % % ## Output % * __features__ Found features. Structure containing image - % keypoints and descriptors with the following fields: - % * **img_idx** - % * **img_size** - % * __keypoints__ - % * __descriptors__ + % keypoints and descriptors with the following fields: + % * **img_idx** + % * **img_size** + % * __keypoints__ + % * __descriptors__ % % See also: cv.FeaturesFinder.FeaturesFinder % @@ -130,22 +133,22 @@ function collectGarbage(this) function features = findParallel(this, imgs, varargin) %FINDPARALLEL Finds features in the given images in parallel % - % features = obj.findParallel(imgs) - % features = obj.findParallel(imgs, rois) + % features = obj.findParallel(imgs) + % features = obj.findParallel(imgs, rois) % % ## Input % * __imgs__ Source images. % * __rois__ Regions of interest for each image. A cell array of - % cell arrays of 4-element vectors `{{[x y w h], ...}, ...}` + % cell arrays of 4-element vectors `{{[x y w h], ...}, ...}` % % ## Output % * __features__ Found features for each image. Structure array - % containing image keypoints and descriptors with the - % following fields: - % * **img_idx** - % * **img_size** - % * __keypoints__ - % * __descriptors__ + % containing image keypoints and descriptors with the following + % fields: + % * **img_idx** + % * **img_size** + % * __keypoints__ + % * __descriptors__ % % See also: cv.FeaturesFinder.FeaturesFinder % diff --git a/+cv/FeaturesMatcher.m b/+cv/FeaturesMatcher.m index 5c8077910..cf6adfda4 100644 --- a/+cv/FeaturesMatcher.m +++ b/+cv/FeaturesMatcher.m @@ -5,47 +5,47 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = FeaturesMatcher(matcherType, varargin) %FEATURESMATCHER Constructor % - % obj = cv.FeaturesMatcher(matcherType) - % obj = cv.FeaturesMatcher(matcherType, 'OptionName',optionValue, ...) + % obj = cv.FeaturesMatcher(matcherType) + % obj = cv.FeaturesMatcher(matcherType, 'OptionName',optionValue, ...) % % ## Input % * __matcherType__ One of: - % * __BestOf2NearestMatcher__ A "best of 2 nearest" matcher. - % Features matcher which finds two best matches for - % each feature and leaves the best one only if the - % ratio between descriptor distances is greater than - % the threshold `MatchConf`. - % * __BestOf2NearestRangeMatcher__ - % * __AffineBestOf2NearestMatcher__ A "best of 2 nearest" - % matcher that expects affine trasformation between - % images. Features matcher similar to - % `BestOf2NearestMatcher` which finds two best matches - % for each feature and leaves the best one only if the - % ratio between descriptor distances is greater than - % the threshold `MatchConf`. - % Unlike `BestOf2NearestMatcher` this matcher uses - % affine transformation (affine trasformation estimate - % will be placed in `matches_info`). + % * __BestOf2NearestMatcher__ A "best of 2 nearest" matcher. + % Features matcher which finds two best matches for each + % feature and leaves the best one only if the ratio between + % descriptor distances is greater than the threshold + % `MatchConf`. + % * __BestOf2NearestRangeMatcher__ + % * __AffineBestOf2NearestMatcher__ A "best of 2 nearest" + % matcher that expects affine trasformation between images. + % Features matcher similar to `BestOf2NearestMatcher` which + % finds two best matches for each feature and leaves the best + % one only if the ratio between descriptor distances is + % greater than the threshold `MatchConf`. Unlike + % `BestOf2NearestMatcher` this matcher uses affine + % transformation (affine trasformation estimate will be placed + % in `matches_info`). % % ## Options % The following are options accepted by all matchers: % % * __TryUseGPU__ Should try to use GPU or not. default false % * __MatchConf__ Match distances ration threshold. Confidence for - % feature matching step. default 0.3 + % feature matching step. default 0.3 % * __NumMatchesThresh1__ Minimum number of matches required for - % the 2D projective transform estimation used in the inliers - % classification step. default 6 + % the 2D projective transform estimation used in the inliers + % classification step. default 6 % * __NumMatchesThresh2__ Minimum number of matches required for - % the 2D projective transform re-estimation on inliers. - % default 6 + % the 2D projective transform re-estimation on inliers. + % default 6 % % The following are options for the various algorithms: % @@ -54,10 +54,10 @@ % % ### `AffineBestOf2NearestMatcher` % * __FullAffine__ whether to use full affine transformation with - % 6 degress of freedom (cv.estimateAffine2D) or reduced - % transformation with 4 degrees of freedom - % (cv.estimateAffinePartial2D) using only rotation, - % translation and uniform scaling. default false + % 6 degress of freedom (cv.estimateAffine2D) or reduced + % transformation with 4 degrees of freedom + % (cv.estimateAffinePartial2D) using only rotation, translation + % and uniform scaling. default false % % See also: cv.FeaturesMatcher.match % @@ -67,7 +67,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FeaturesMatcher % @@ -78,7 +78,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = FeaturesMatcher_(this.id, 'typeid'); end @@ -89,7 +92,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Frees unused memory allocated before if there is any % - % obj.collectGarbage() + % obj.collectGarbage() % % See also: cv.FeaturesMatcher.FeaturesMatcher % @@ -99,11 +102,11 @@ function collectGarbage(this) function tf = isThreadSafe(this) %ISTHREADSAFE Check if matcher is thread safe % - % tf = obj.isThreadSafe() + % tf = obj.isThreadSafe() % % ## Output % * __tf__ True, if it's possible to use the same matcher instance - % in parallel, false otherwise. + % in parallel, false otherwise. % % See also: cv.FeaturesMatcher.FeaturesMatcher % @@ -113,7 +116,7 @@ function collectGarbage(this) function matches_info = match(this, features1, features2) %MATCH Performs images matching % - % matches_info = obj.match(features1, features2) + % matches_info = obj.match(features1, features2) % % ## Input % * __features1__ First image features. See cv.FeaturesFinder. @@ -121,22 +124,20 @@ function collectGarbage(this) % % ## Output % * **matches_info** Found matches. Structure containing - % information about matches between two images. - % It's assumed that there is a transformation between those - % images. Transformation may be homography or affine - % transformation based on selected matcher. Struct with the - % following fields: - % * **src_img_idx** Images indices (optional). - % * **dst_img_idx** Images indices (optional). - % * __matches__ Matches. A 1-by-N structure array with the - % following fields: - % `{'queryIdx', 'trainIdx', 'imgIdx', 'distance'}` - % * **inliers_mask** Geometrically consistent matches mask. - % * **num_inliers** Number of geometrically consistent - % matches. - % * __H__ Estimated transformation. - % * __confidence__ Confidence two images are from the same - % panorama. + % information about matches between two images. It's assumed + % that there is a transformation between those images. + % Transformation may be homography or affine transformation + % based on selected matcher. Struct with the following fields: + % * **src_img_idx** Images indices (optional). + % * **dst_img_idx** Images indices (optional). + % * __matches__ Matches. A 1-by-N structure array with the + % following fields: + % `{'queryIdx', 'trainIdx', 'imgIdx', 'distance'}` + % * **inliers_mask** Geometrically consistent matches mask. + % * **num_inliers** Number of geometrically consistent matches. + % * __H__ Estimated transformation. + % * __confidence__ Confidence two images are from the same + % panorama. % % See also: cv.FeaturesMatcher.match_pairwise, % cv.FeaturesFinder.find @@ -147,19 +148,19 @@ function collectGarbage(this) function pairwise_matches = match_pairwise(this, features, varargin) %MATCH_PAIRWISE Performs images matching % - % pairwise_matches = obj.match_pairwise(features) - % pairwise_matches = obj.match_pairwise(features, 'OptionName',optionValue, ...) + % pairwise_matches = obj.match_pairwise(features) + % pairwise_matches = obj.match_pairwise(features, 'OptionName',optionValue, ...) % % ## Input % * __features__ Features of the source images. - % See cv.FeaturesFinder. + % See cv.FeaturesFinder. % % ## Output % * **pairwise_matches** Found pairwise matches. % % ## Options % * __Mask__ Mask indicating which image pairs must be matched. - % default empty + % default empty % % The function is parallelized with the TBB library. % @@ -174,17 +175,17 @@ function collectGarbage(this) function str = matchesGraphAsString(pairwise_matches, conf_threshold) %MATCHESGRAPHASSTRING Covert matches to graph % - % str = cv.FeaturesMatcher.matchesGraphAsString(pairwise_matches, conf_threshold) + % str = cv.FeaturesMatcher.matchesGraphAsString(pairwise_matches, conf_threshold) % % ## Input % * **pairwise_matches** Pairwise matches. % * **conf_threshold** Threshold for two images are from the same - % panorama confidence. + % panorama confidence. % % ## Output - % * __str__ matches graph represented in DOT language. - % Labels description: `Nm` is number of matches, `Ni` is - % number of inliers, `C` is confidence. + % * __str__ matches graph represented in DOT language. Labels + % description: `Nm` is number of matches, `Ni` is number of + % inliers, `C` is confidence. % % Returns matches graph representation in DOT language. % @@ -194,13 +195,13 @@ function collectGarbage(this) function indices = leaveBiggestComponent(features, pairwise_matches, conf_threshold) %LEAVEBIGGESTCOMPONENT Leave biggest component % - % indices = cv.FeaturesMatcher.leaveBiggestComponent(features, pairwise_matches, conf_threshold) + % indices = cv.FeaturesMatcher.leaveBiggestComponent(features, pairwise_matches, conf_threshold) % % ## Input % * __features__ Features of the source images. % * **pairwise_matches** Pairwise matches. % * **conf_threshold** Threshold for two images are from the same - % panorama confidence. + % panorama confidence. % % ## Output % * __indices__ array of image indices (0-based). diff --git a/+cv/FileStorage.m b/+cv/FileStorage.m index 2e187743c..89ecefc65 100644 --- a/+cv/FileStorage.m +++ b/+cv/FileStorage.m @@ -1,37 +1,37 @@ %FILESTORAGE Reading from or writing to a XML/YAML/JSON file storage % -% S = cv.FileStorage(source) -% [S,~] = cv.FileStorage(source) +% S = cv.FileStorage(source) +% [S,~] = cv.FileStorage(source) % -% cv.FileStorage(source, S) -% cv.FileStorage(source, X1, X2, ...) -% str = cv.FileStorage(source, S) -% str = cv.FileStorage(source, X1, X2, ...) +% cv.FileStorage(source, S) +% cv.FileStorage(source, X1, X2, ...) +% str = cv.FileStorage(source, S) +% str = cv.FileStorage(source, X1, X2, ...) % % ## Input % * __source__ Name of the file to open or the text string to read the data -% from. Extension of the file (`.xml`, `.yml`/`.yaml`, or `.json`) -% determines its format (XML, YAML or JSON respectively). Also you can -% append `.gz` to work with gzip-compressed files, for example -% `myHugeMatrix.xml.gz`. When serializing to a string, `source` is used -% just to specify the output file format (e.g. `mydata.xml`, `.yml`). -% A file name can also contain parameters like `file.yml?base64` -% (case sensitive) in which case it outputs sequences in Base64 encoding -% rather than in plain text. +% from. Extension of the file (`.xml`, `.yml`/`.yaml`, or `.json`) +% determines its format (XML, YAML or JSON respectively). Also you can +% append `.gz` to work with gzip-compressed files, for example +% `myHugeMatrix.xml.gz`. When serializing to a string, `source` is used just +% to specify the output file format (e.g. `mydata.xml`, `.yml`). A file name +% can also contain parameters like `file.yml?base64` (case sensitive) in +% which case it outputs sequences in Base64 encoding rather than in plain +% text. % * __S__ Scalar struct to be written to file. Each field represents an object -% where the field name is the variable name, and the field value is the -% object value. +% where the field name is the variable name, and the field value is the +% object value. % * __X1__, __X2__, __...__ objects to be written to file. This is equivalent -% to the previous format with `S = struct('name',{{X1,X2,...}})` with -% a placeholder field name based on the filename. +% to the previous format with `S = struct('name',{{X1,X2,...}})` with a +% placeholder field name based on the filename. % % ## Output % * __S__ Scalar struct read from file. Each field represents a variable. % * __str__ optional output when writing. If requested, the data is persisted -% to a string in memory instead of writing to disk. +% to a string in memory instead of writing to disk. % % The function reads or writes a MATLAB object from/to a -% [XML](http://www.w3c.org/XML), [YAML](http://www.yaml.org), or +% [XML](http://www.w3.org/XML), [YAML](http://www.yaml.org), or % [JSON](http://www.json.org/) file. The file is compatible with OpenCV % formats. % @@ -44,12 +44,12 @@ % objects (`X1`, `X2`, ...) in that struct. In other words the following % forms are equivalent: % -% vars = {'hi', pi, magic(5)}; -% cv.FileStorage('mydata.xml', vars{:}); +% vars = {'hi', pi, magic(5)}; +% cv.FileStorage('mydata.xml', vars{:}); % -% S = struct(); -% S.(name) = vars; -% cv.FileStorage('mydata.xml', S); +% S = struct(); +% S.(name) = vars; +% cv.FileStorage('mydata.xml', S); % % where `name` is a default object name generated from the filename. % @@ -72,81 +72,87 @@ % % Writing to a file: % -% % export two variables to a YAML file -% % first is a 2x3 matrix named field1, second is a string named field2 -% S = struct('field1',randn(2,3), 'field2','this is the second field'); -% cv.FileStorage('my.yml', S); +% % export two variables to a YAML file +% % first is a 2x3 matrix named field1, second is a string named field2 +% S = struct('field1',randn(2,3), 'field2','this is the second field'); +% cv.FileStorage('my.yml', S); % % Reading from a file: % -% % import variables from YAML file -% S = cv.FileStorage('my.yml'); -% S.field1 % matrix -% S.field2 % string +% % import variables from YAML file +% S = cv.FileStorage('my.yml'); +% S.field1 % matrix +% S.field2 % string % % Replace '.yml' with '.xml' to use XML format. % % ## Example % Below is an example of four variables stored in XML, YAML and JSON files: % -% >> S = struct('var1',magic(3), 'var2','foo bar', 'var3',1, 'var4',{{2 3}}); -% >> cv.FileStorage('test.xml', S); -% >> cv.FileStorage('test.yml', S); -% >> cv.FileStorage('test.json', S); -% >> S = cv.FileStorage('test.xml') -% S = -% var1: [3x3 double] -% var2: 'foo bar' -% var3: 1 -% var4: {[2] [3]} -% -% __XML__ -% -% -% -% -% 3 -% 3 -%
d
-% 8. 1. 6. 3. 5. 7. 4. 9. 2. -%
-% "foo bar" -% 1. -% 2. 3. -%
-% -% __YAML__ -% -% %YAML:1.0 -% --- -% var1: !!opencv-matrix -% rows: 3 -% cols: 3 -% dt: d -% data: [ 8., 1., 6., 3., 5., 7., 4., 9., 2. ] -% var2: foo bar -% var3: 1. -% var4: -% - 2. -% - 3. -% -% __JSON__ -% -% { -% "var1": { -% "type_id": "opencv-matrix", -% "rows": 3, -% "cols": 3, -% "dt": "d", -% "data": [ 8., 1., 6., 3., 5., 7., 4., 9., 2. ] -% }, -% "var2": "foo bar", -% "var3": 1., -% "var4": [ -% 2., -% 3. -% ] -% } +% >> S = struct('var1',magic(3), 'var2','foo bar', 'var3',1, 'var4',{{2 3}}); +% >> cv.FileStorage('test.xml', S); +% >> cv.FileStorage('test.yml', S); +% >> cv.FileStorage('test.json', S); +% >> S = cv.FileStorage('test.xml') +% S = +% var1: [3x3 double] +% var2: 'foo bar' +% var3: 1 +% var4: {[2] [3]} +% +% * __XML__ +% +% ```xml +% +% +% +% 3 +% 3 +%
d
+% 8. 1. 6. 3. 5. 7. 4. 9. 2. +%
+% "foo bar" +% 1. +% 2. 3. +%
+% ``` +% +% * __YAML__ +% +% ```yaml +% %YAML:1.0 +% --- +% var1: !!opencv-matrix +% rows: 3 +% cols: 3 +% dt: d +% data: [ 8., 1., 6., 3., 5., 7., 4., 9., 2. ] +% var2: foo bar +% var3: 1. +% var4: +% - 2. +% - 3. +% ``` +% +% * __JSON__ +% +% ```json +% { +% "var1": { +% "type_id": "opencv-matrix", +% "rows": 3, +% "cols": 3, +% "dt": "d", +% "data": [ 8., 1., 6., 3., 5., 7., 4., 9., 2. ] +% }, +% "var2": "foo bar", +% "var3": 1., +% "var4": [ +% 2., +% 3. +% ] +% } +% ``` % % See also: load, save, xmlread, xmlwrite, jsonencode, jsondecode, % netcdf, h5info, hdfinfo, hdftool, cdflib diff --git a/+cv/GBTrees.m_ b/+cv/GBTrees.m_ index 1c2821201..e1a067d6f 100644 --- a/+cv/GBTrees.m_ +++ b/+cv/GBTrees.m_ @@ -20,7 +20,7 @@ classdef GBTrees < handle % `sum_{i=1..N} L(y_i, F(x_i)) = L(F(x_1), F(x_2), ... , F(x_N)) = L(F)`. % And the `L(F)` gradient can be computed as follows: % - % grad(L(F)) = ( dL(y_1,F(x_1))/dF(x_1), ... , dL(y_N,F(x_N))/dF(x_N) ) + % grad(L(F)) = ( dL(y_1,F(x_1))/dF(x_1), ... , dL(y_N,F(x_N))/dF(x_N) ) % % At every training step, a single regression tree is built to predict an % antigradient vector components. Step length is computed corresponding to @@ -44,12 +44,12 @@ classdef GBTrees < handle % % * __Huber__ Huber loss: % - % { delta * (|y-f(x)| - delta/2) , |y-f(x)| > delta - % L(y,f(x)) = { - % { 0.5 * (y-f(x))^2 , |y-f(x)| <= delta + % { delta * (|y-f(x)| - delta/2) , |y-f(x)| > delta + % L(y,f(x)) = { + % { 0.5 * (y-f(x))^2 , |y-f(x)| <= delta % - % where `delta` is the `alpha`-quantile estimation of the `|y-f(x)|`. - % In the current implementation `alpha=0.2`. + % where `delta` is the `alpha`-quantile estimation of the `|y-f(x)|`. In + % the current implementation `alpha=0.2`. % % The following loss functions are implemented for classification % problems: @@ -62,7 +62,7 @@ classdef GBTrees < handle % % As a result, you get the following model: % - % f(x) = f_0 + nu * sum_{i=1..M} T_i(x) + % f(x) = f_0 + nu * sum_{i=1..M} T_i(x) % % where `f_0` is the initial guess (the best constant model) and `nu` is a % regularization parameter from the interval `(0,1]`, further called @@ -202,8 +202,8 @@ classdef GBTrees < handle function this = GBTrees(varargin) %GBTREES Creates/trains a new GBTrees model % - % model = cv.GBTrees - % model = cv.GBTrees(...) + % model = cv.GBTrees + % model = cv.GBTrees(...) % % The first variant creates an empty model. Use cv.GBTrees.train % to train the model, or cv.GBTrees.load to load a pre-trained @@ -223,7 +223,7 @@ classdef GBTrees < handle function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.GBTrees % @@ -237,7 +237,7 @@ classdef GBTrees < handle function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -253,11 +253,11 @@ classdef GBTrees < handle function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.GBTrees.clear, cv.GBTrees.load % @@ -267,17 +267,17 @@ classdef GBTrees < handle function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -291,23 +291,22 @@ classdef GBTrees < handle function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -321,11 +320,11 @@ classdef GBTrees < handle function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GBTrees.save, cv.GBTrees.load % @@ -338,7 +337,7 @@ classdef GBTrees < handle function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -351,7 +350,7 @@ classdef GBTrees < handle function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -364,11 +363,11 @@ classdef GBTrees < handle function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % If `LossFunctionType='Deviance'` the problem is classification, % and regression otherwise. @@ -381,112 +380,107 @@ classdef GBTrees < handle function status = train(this, samples, responses, varargin) %TRAIN Trains a Gradient boosted tree model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Row vectors of feature. % * __responses__ Output of the corresponding feature vectors. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -508,10 +502,10 @@ classdef GBTrees < handle function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -525,14 +519,13 @@ classdef GBTrees < handle % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -546,14 +539,14 @@ classdef GBTrees < handle function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input row vectors (one or more) stored as rows of - % a floating-point matrix. If not all the variables were - % actually used during training, `samples` contains forged - % values at the appropriate places. + % a floating-point matrix. If not all the variables were + % actually used during training, `samples` contains forged + % values at the appropriate places. % % ## Output % * __results__ Output labels or regression values. @@ -561,14 +554,14 @@ classdef GBTrees < handle % % ## Options % * __Flags__ The optional predict flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % The method predicts the response corresponding to the given % sample. The result is either the class label or the estimated % function value. The method enables using the parallel version of % the GBT model prediction if the OpenCV is built with the TBB - % library. In this case, predictions of single trees are computed in - % a parallel fashion. + % library. In this case, predictions of single trees are computed + % in a parallel fashion. % % See also: cv.GBTrees.train, cv.GBTrees.calcError % @@ -581,14 +574,13 @@ classdef GBTrees < handle function setK(this, K) %SETK Set number of tree ensembles built in case of the classification problem % - % model.setK(K) + % model.setK(K) % % ## Input % * __K__ Use this parameter to change the output to sum of the - % trees' predictions in the k-th ensemble only. To get the - % total GBT model prediction, `K` value must be -1. For - % regression problems, `K` is also equal to -1. Its value is - % -1 by default. + % trees' predictions in the k-th ensemble only. To get the total + % GBT model prediction, `K` value must be -1. For regression + % problems, `K` is also equal to -1. Its value is -1 by default. % % See Training the GBT model. % @@ -600,7 +592,7 @@ classdef GBTrees < handle function roots = getRoots(this) %GETROOTS Returns indices of root nodes % - % roots = classifier.getRoots() + % roots = model.getRoots() % % ## Output % * __roots__ vector of indices. @@ -613,24 +605,22 @@ classdef GBTrees < handle function nodes = getNodes(this) %GETNODES Returns all the nodes % - % nodes = classifier.getNodes() + % nodes = model.getNodes() % % ## Output % * __nodes__ Struct-array with the following fields: - % * __value__ Value at the node: a class label in case of - % classification or estimated function value in case - % of regression. - % * __classIdx__ Class index normalized to `0..class_count-1` - % range and assigned to the node. It is used - % internally in classification trees and tree - % ensembles. - % * __parent__ Index of the parent node. - % * __left__ Index of the left child node. - % * __right__ Index of right child node. - % * __defaultDir__ Default direction where to go (-1: left - % or +1: right). It helps in the case of missing - % values. - % * __split__ Index of the first split. + % * __value__ Value at the node: a class label in case of + % classification or estimated function value in case of + % regression. + % * __classIdx__ Class index normalized to `0..class_count-1` + % range and assigned to the node. It is used internally in + % classification trees and tree ensembles. + % * __parent__ Index of the parent node. + % * __left__ Index of the left child node. + % * __right__ Index of right child node. + % * __defaultDir__ Default direction where to go (-1 left or +1 + % right). It helps in the case of missing values. + % * __split__ Index of the first split. % % all the node indices are zero-based indices in the returned % vector. @@ -643,26 +633,25 @@ classdef GBTrees < handle function splits = getSplits(this) %GETSPLITS Returns all the splits % - % splits = classifier.getSplits() + % splits = model.getSplits() % % ## Output % * __splits__ Struct-array with the following fields: - % * __varIdx__ Index of variable on which the split is - % created. - % * __inversed__ If true, then the inverse split rule is - % used (i.e. left and right branches are exchanged in - % the rule expressions below). - % * __quality__ The split quality, a positive number. It is - % used to choose the best split. (It is also used to - % compute variable importance). - % * __next__ Index of the next split in the list of splits - % for the node (surrogate splits). - % * __c__ The threshold value in case of split on an ordered - % variable. The rule is: - % `if var_value < c, next_node = left; else next_node = right; end` - % * __subsetOfs__ Offset of the bitset used by the split on - % a categorical variable. The rule is: - % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` + % * __varIdx__ Index of variable on which the split is created. + % * __inversed__ If true, then the inverse split rule is used + % (i.e. left and right branches are exchanged in the rule + % expressions below). + % * __quality__ The split quality, a positive number. It is used + % to choose the best split. (It is also used to compute + % variable importance). + % * __next__ Index of the next split in the list of splits for + % the node (surrogate splits). + % * __c__ The threshold value in case of split on an ordered + % variable. The rule is: + % `if var_value < c, next_node = left; else next_node = right; end` + % * __subsetOfs__ Offset of the bitset used by the split on a + % categorical variable. The rule is: + % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` % % all the split indices are zero-based indices in the returned % vector. @@ -675,7 +664,7 @@ classdef GBTrees < handle function subsets = getSubsets(this) %GETSUBSETS Returns all the bitsets for categorical splits % - % subsets = classifier.getSubsets() + % subsets = model.getSubsets() % % ## Output % * __subsets__ vector of indices. diff --git a/+cv/GFTTDetector.m b/+cv/GFTTDetector.m index cdfc6d208..2e36624c1 100644 --- a/+cv/GFTTDetector.m +++ b/+cv/GFTTDetector.m @@ -26,7 +26,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -70,17 +71,18 @@ function this = GFTTDetector(varargin) %GFTTDETECTOR Constructor % - % obj = cv.GFTTDetector() - % obj = cv.GFTTDetector(..., 'OptionName',optionValue, ...) + % obj = cv.GFTTDetector() + % obj = cv.GFTTDetector(..., 'OptionName',optionValue, ...) % % ## Options % * __MaxFeatures__ See cv.GFTTDetector.MaxFeatures, default 1000 % * __QualityLevel__ See cv.GFTTDetector.QualityLevel, - % default 0.01 + % default 0.01 % * __MinDistance__ See cv.GFTTDetector.MinDistance, default 1 % * __BlockSize__ See cv.GFTTDetector.BlockSize, default 3 + % * __GradientSize__ default 3 % * __HarrisDetector__ See cv.GFTTDetector.HarrisDetector, - % default false + % default false % * __K__ See cv.GFTTDetector.K, default 0.04 % % See also: cv.GFTTDetector.detect @@ -91,7 +93,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GFTTDetector % @@ -102,7 +104,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -116,7 +118,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GFTTDetector.empty, cv.GFTTDetector.load % @@ -126,11 +128,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.GFTTDetector.clear, cv.GFTTDetector.load % @@ -140,7 +142,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -156,21 +158,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -184,11 +186,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GFTTDetector.save, cv.GFTTDetector.load % @@ -201,28 +203,26 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image where - % keypoints (corners) are detected. + % keypoints (corners) are detected. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.GFTTDetector.GFTTDetector % diff --git a/+cv/GaussianBlur.m b/+cv/GaussianBlur.m index 741c19ad8..7e1f81c1a 100644 --- a/+cv/GaussianBlur.m +++ b/+cv/GaussianBlur.m @@ -1,30 +1,29 @@ %GAUSSIANBLUR Smoothes an image using a Gaussian filter % -% dst = cv.GaussianBlur(src) -% dst = cv.GaussianBlur(src, 'OptionName',optionValue, ...) +% dst = cv.GaussianBlur(src) +% dst = cv.GaussianBlur(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image; the image can have any number of channels, which are -% processed independently, but the depth should be `uint8`, `uint16`, -% `int16`, `single` or `double`. +% processed independently, but the depth should be `uint8`, `uint16`, +% `int16`, `single` or `double`. % % ## Output % * __dst__ output image of the same size and type as `src`. % % ## Options -% * __KSize__ Gaussian kernel size. `KSize(1)` and `KSize(2)` can differ -% but they both must be positive and odd. Or, they can be zeros and they -% are computed from `SigmaX` and `SigmaY`. default [5,5] +% * __KSize__ Gaussian kernel size. `KSize(1)` and `KSize(2)` can differ but +% they both must be positive and odd. Or, they can be zeros and they are +% computed from `SigmaX` and `SigmaY`. default [5,5] % * __SigmaX__ Gaussian kernel standard deviation in X direction. default 0 % * __SigmaY__ Gaussian kernel standard deviation in Y direction. If `SigmaY` -% is zero, it is set to be equal to `SigmaX`. If both sigmas are zeros, -% they are computed from `KSize(2)` and `KSize(1)`, respectively (see -% cv.getGaussianKernel for details). To fully control the result -% regardless of possible future modifications of all this semantics, it -% is recommended to specify all of `KSize`, `SigmaX`, and `SigmaY`. -% default 0 +% is zero, it is set to be equal to `SigmaX`. If both sigmas are zeros, they +% are computed from `KSize(2)` and `KSize(1)`, respectively (see +% cv.getGaussianKernel for details). To fully control the result regardless +% of possible future modifications of all this semantics, it is recommended +% to specify all of `KSize`, `SigmaX`, and `SigmaY`. default 0 % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. -% default 'Default' +% default 'Default' % % The function convolves the source image with the specified Gaussian kernel. % diff --git a/+cv/GeneralizedHoughBallard.m b/+cv/GeneralizedHoughBallard.m index 889d7f64c..5af9475d7 100644 --- a/+cv/GeneralizedHoughBallard.m +++ b/+cv/GeneralizedHoughBallard.m @@ -14,7 +14,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -44,7 +45,7 @@ function this = GeneralizedHoughBallard() %GENERALIZEDHOUGHBALLARD Constructor % - % obj = cv.GeneralizedHoughBallard() + % obj = cv.GeneralizedHoughBallard() % % See also: cv.GeneralizedHoughBallard.detect % @@ -54,7 +55,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GeneralizedHoughBallard % @@ -68,7 +69,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GeneralizedHoughBallard.empty, % cv.GeneralizedHoughBallard.load @@ -79,21 +80,21 @@ function clear(this) function load(this, filename, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -107,7 +108,7 @@ function load(this, filename, varargin) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,11 +124,11 @@ function save(this, filename) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.GeneralizedHoughBallard.clear, % cv.GeneralizedHoughBallard.load @@ -138,11 +139,11 @@ function save(this, filename) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GeneralizedHoughBallard.save, % cv.GeneralizedHoughBallard.load @@ -156,21 +157,21 @@ function save(this, filename) function [positions,votes] = detect(this, varargin) %DETECT Find template on image % - % [positions,votes] = hough.detect(image) - % [positions,votes] = hough.detect(edges, dx, dy) + % [positions,votes] = hough.detect(image) + % [positions,votes] = hough.detect(edges, dx, dy) % % ## Input % * __image__ input image, 8-bit 1-channel image % * __edges__ image edges % * __dx__ image x-derivative of the same size as `edges` and - % single-precision floating type. + % single-precision floating type. % * __dy__ image y-derivate of the same size and type as `dx`. % % ## Output % * __positions__ Cell array of 4-element vectors, each of the - % form: `[posx, posy, scale, angle]` + % form: `[posx, posy, scale, angle]` % * __votes__ Cell array of 3-element vectors, of the same length - % as `positions`. + % as `positions`. % [positions,votes] = GeneralizedHoughBallard_(this.id, 'detect', varargin{:}); end @@ -178,20 +179,20 @@ function save(this, filename) function setTemplate(this, varargin) %SETTEMPLATE Set template to search % - % hough.setTemplate(templ) - % hough.setTemplate(edges, dx, dy) - % hough.setTemplate(..., 'OptionName', optionValue, ...) + % hough.setTemplate(templ) + % hough.setTemplate(edges, dx, dy) + % hough.setTemplate(..., 'OptionName', optionValue, ...) % % ## Input % * __templ__ template, 8-bit 1-channel image % * __edges__ template edges % * __dx__ template x-derivative of the same size as `edges` and - % single-precision floating type. + % single-precision floating type. % * __dy__ template y-derivate of the same size and type as `dx`. % % ## Options % * __Center__ Template center `[x,y]`. The default `[-1,-1]` - % will use `[size(templ,2) size(templ,1)]./2` as center. + % will use `[size(templ,2) size(templ,1)]./2` as center. % % In the first variant of the function (with the `templ` input), % the `edges` are internally calculated using the cv.Canny filter, diff --git a/+cv/GeneralizedHoughGuil.m b/+cv/GeneralizedHoughGuil.m index 3014e0b96..cdb7f4869 100644 --- a/+cv/GeneralizedHoughGuil.m +++ b/+cv/GeneralizedHoughGuil.m @@ -1,5 +1,5 @@ classdef GeneralizedHoughGuil < handle - %GeneralizedHoughGuil Generalized Hough transform + %GENERALIZEDHOUGHGUIL Generalized Hough transform % % Finds arbitrary template in the grayscale image using % Generalized Hough Transform. @@ -15,7 +15,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -64,7 +65,7 @@ function this = GeneralizedHoughGuil() %GENERALIZEDHOUGHGUIL Constructor % - % obj = cv.GeneralizedHoughGuil() + % obj = cv.GeneralizedHoughGuil() % % See also: cv.GeneralizedHoughGuil.detect % @@ -74,7 +75,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GeneralizedHoughGuil % @@ -88,7 +89,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GeneralizedHoughGuil.empty, % cv.GeneralizedHoughGuil.load @@ -99,21 +100,21 @@ function clear(this) function load(this, filename, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -127,7 +128,7 @@ function load(this, filename, varargin) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -143,11 +144,11 @@ function save(this, filename) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.GeneralizedHoughGuil.clear, % cv.GeneralizedHoughGuil.load @@ -158,11 +159,11 @@ function save(this, filename) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GeneralizedHoughGuil.save, % cv.GeneralizedHoughGuil.load @@ -176,21 +177,21 @@ function save(this, filename) function [positions,votes] = detect(this, varargin) %DETECT Find template on image % - % [positions,votes] = hough.detect(image) - % [positions,votes] = hough.detect(edges, dx, dy) + % [positions,votes] = hough.detect(image) + % [positions,votes] = hough.detect(edges, dx, dy) % % ## Input % * __image__ input image, 8-bit 1-channel image % * __edges__ image edges % * __dx__ image x-derivative of the same size as `edges` and - % single-precision floating type. + % single-precision floating type. % * __dy__ image y-derivate of the same size and type as `dx`. % % ## Output % * __positions__ Cell array of 4-element vectors, each of the - % form: `[posx, posy, scale, angle]` + % form: `[posx, posy, scale, angle]` % * __votes__ Cell array of 3-element vectors, of the same length - % as `positions`. + % as `positions`. % [positions,votes] = GeneralizedHoughGuil_(this.id, 'detect', varargin{:}); end @@ -198,20 +199,20 @@ function save(this, filename) function setTemplate(this, varargin) %SETTEMPLATE Set template to search % - % hough.setTemplate(templ) - % hough.setTemplate(edges, dx, dy) - % hough.setTemplate(..., 'OptionName', optionValue, ...) + % hough.setTemplate(templ) + % hough.setTemplate(edges, dx, dy) + % hough.setTemplate(..., 'OptionName', optionValue, ...) % % ## Input % * __templ__ template, 8-bit 1-channel image % * __edges__ template edges % * __dx__ template x-derivative of the same size as `edges` and - % single-precision floating type. + % single-precision floating type. % * __dy__ template y-derivate of the same size and type as `dx`. % % ## Options % * __Center__ Template center `[x,y]`. The default `[-1,-1]` - % will use `[size(templ,2) size(templ,1)]./2` as center. + % will use `[size(templ,2) size(templ,1)]./2` as center. % % In the first variant of the function (with the `templ` input), % the `edges` are internally calculated using the cv.Canny filter, diff --git a/+cv/HOGDescriptor.m b/+cv/HOGDescriptor.m index e98e4e9ba..78a95014d 100644 --- a/+cv/HOGDescriptor.m +++ b/+cv/HOGDescriptor.m @@ -1,14 +1,21 @@ classdef HOGDescriptor < handle - %HOGDESCRIPTOR Histogram of Oriented Gaussian (HOG) descriptor and detector + %HOGDESCRIPTOR Histogram of Oriented Gaussian (HOG) descriptor and object detector % - % The class implements Histogram of Oriented Gradients object detector - % [Dalal2005]. + % The HOG descriptor algorithm introduced by [Dalal2005]. + % + % Useful links: + % + % - [PDF](https://hal.inria.fr/inria-00548512/document/) + % - [Wikipedia](https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients) + % - [IPP](https://software.intel.com/en-us/ipp-dev-reference-histogram-of-oriented-gradients-hog-descriptor) + % - [LearnOpenCV](http://www.learnopencv.com/histogram-of-oriented-gradients) + % - [LearnOpenCV](http://www.learnopencv.com/handwritten-digits-classification-an-opencv-c-python-tutorial) % % ## Example % The basic usage is the following for computing HOG descriptors: % - % hog = cv.HOGDescriptor(); - % descriptors = hog.compute(im); + % hog = cv.HOGDescriptor(); + % descriptors = hog.compute(im); % % This computes descriptors in a "dense" setting; Each row is a feature % vector computed from a window of size `WinSize` slided across the input @@ -23,8 +30,8 @@ % keypoints (for example SIFT or SURF keypoints), use the `Locations` % option of the compute method: % - % keypoints = cv.FAST(im); - % descriptors = hog.compute(im, 'Locations', {keypoints.pt}); + % keypoints = cv.FAST(im); + % descriptors = hog.compute(im, 'Locations', {keypoints.pt}); % % The next step in object recognition using HOG descriptors is to feed the % descriptors computed from positive and negative images into a linear SVM @@ -36,9 +43,9 @@ % Alternatively, you can use the default built-in people detector which is % accessible by name as: % - % % detect and localize upright people in images - % hog.SVMDetector = 'DefaultPeopleDetector'; - % boxes = hog.detectMultiScale(im); + % % detect and localize upright people in images + % hog.SVMDetector = 'DefaultPeopleDetector'; + % boxes = hog.detectMultiScale(im); % % In this case, there is no need to train an SVM model. % @@ -71,18 +78,30 @@ end properties (Dependent) - WinSize % Window size - BlockSize % Block size - BlockStride % Block stride of a grid - CellSize % Cell size of a grid - NBins % Number of bins - DerivAperture % Derivative of aperture - WinSigma % Window sigma - HistogramNormType % Histogram normalization method - L2HysThreshold % L2 Hysterisis threshold - GammaCorrection % Gamma correction - NLevels % Number of levels - SignedGradient % Signed gradient + % Window size + WinSize + % Block size + BlockSize + % Block stride of a grid + BlockStride + % Cell size of a grid + CellSize + % Number of bins + NBins + % Derivative of aperture + DerivAperture + % Window sigma + WinSigma + % Histogram normalization method + HistogramNormType + % L2 Hysterisis threshold + L2HysThreshold + % Gamma correction + GammaCorrection + % Number of levels + NLevels + % Signed gradient + SignedGradient % Coefficients for the linear SVM classifier. % % You can either specify a pretrained classier for people detection @@ -90,10 +109,10 @@ % other uses. The avaiable pretrained classifiers are: % % * __DefaultPeopleDetector__ coefficients of the classifier trained - % for people detection (for default window size). + % for people detection (for default window size 64x128). % * __DaimlerPeopleDetector__ 1981 SVM coeffs obtained from daimler's - % base. To use these coeffs the detection window size should be - % [48,96]. + % base. To use these coeffs the detection window size should be + % [48,96]. % % See also: cv.HOGDescriptor.checkDetectorSize, % cv.HOGDescriptor.load, cv.HOGDescriptor.readALTModel @@ -105,42 +124,42 @@ function this = HOGDescriptor(varargin) %HOGDESCRIPTOR Create a new or load an existing HOG descriptor and detector % - % hog = cv.HOGDescriptor() - % hog = cv.HOGDescriptor('PropertyName', propertyValue, ...) - % hog = cv.HOGDescriptor(filename) + % hog = cv.HOGDescriptor() + % hog = cv.HOGDescriptor('PropertyName', propertyValue, ...) + % hog = cv.HOGDescriptor(filename) % % ## Input % * __filename__ Filename of existing HOG descriptor config to - % load from (XML or YAML). This is handled by the load - % method. + % load from (XML or YAML). This is handled by the load method. % % ## Output - % * __hog_ HOG descriptor object. + % * __hog__ HOG descriptor object. % % ## Options % * __WinSize__ Detection window size. Align to block size and - % block stride. default [64,128] + % block stride. default [64,128] % * __BlockSize__ Block size in pixels. Align to cell size. - % default [16,16] + % default [16,16] % * __BlockStride__ Block stride. It must be a multiple of cell - % size. default [8,8] + % size. default [8,8] % * __CellSize__ Cell size. default [8,8] - % * __NBins__ Number of bins. default 9 + % * __NBins__ Number of bins in the calculation of histogram of + % gradients. default 9 % * __DerivAperture__ Derivative of aperture. default 1 % * __WinSigma__ Gaussian smoothing window parameter. default -1 - % (corresponds to `sum(BlockSize)/8`) + % (corresponds to `sum(BlockSize)/8`) % * __HistogramNormType__ Histogram normalization method. - % default 'L2Hys' + % default 'L2Hys' % * __L2HysThreshold__ L2-Hys normalization method shrinkage. - % default 0.2 + % default 0.2 % * __GammaCorrection__ Flag to specify whether the gamma - % correction preprocessing is required or not. default true + % correction preprocessing is required or not. default true % * __NLevels__ Maximum number of detection window increases. - % default 64 + % default 64 % * __SignedGradient__ Flag to specify whether orientations range - % in 0-180 (false) or 0-360 (true) degrees. 0-180 mean that - % positive/negative directions count as the same histogram - % bin. default false + % in 0-180 (false) or 0-360 (true) degrees. 0-180 mean that + % positive/negative directions count as the same histogram bin. + % default false % % See also: cv.HOGDescriptor.load % @@ -155,7 +174,7 @@ function delete(this) %DELETE Destructor % - % hog.delete() + % hog.delete() % % See also: cv.HOGDescriptor % @@ -166,24 +185,28 @@ function delete(this) function status = load(this, fname_or_str, varargin) %LOAD Loads a HOG descriptor config from a file or a string % - % status = hog.load(filename) - % status = hog.load(str, 'FromString',true) - % status = hog.load(..., 'OptionName',optionValue, ...) + % status = hog.load(filename) + % status = hog.load(str, 'FromString',true) + % status = hog.load(..., 'OptionName',optionValue, ...) % % ## Input - % * __filename__ HOG descriptor config filename (XML or YAML). + % * __filename__ HOG descriptor config filename (XML or YAML), + % containing properties and coefficients of the trained + % classifier. % * __str__ String containing the serialized HOG descriptor you - % want to load. + % want to load. % % ## Output % * __status__ a logical value indicating success of load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % Loads coefficients for the linear SVM classifier from a file. % % See also: cv.HOGDescriptor.save, cv.HOGDescriptor.readALTModel % @@ -193,20 +216,22 @@ function delete(this) function varargout = save(this, filename, varargin) %SAVE Saves a HOG descriptor config to a file % - % hog.save(filename) - % hog.save(filename, 'OptionName',optionValue, ...) - % str = hog.save(...) + % hog.save(filename) + % hog.save(filename, 'OptionName',optionValue, ...) + % str = hog.save(...) % % ## Input % * __filename__ HOG descriptor config filename (XML or YAML). % % ## Output % * __str__ optional output. If requested, the HOG descriptor is - % persisted to a string in memory instead of writing to disk. + % persisted to a string in memory instead of writing to disk. % % ## Options % * __ObjName__ The optional name of the node to write (if empty, - % a default name value will be used). default empty + % a default name value will be used). default empty + % + % Saves coefficients for the linear SVM classifier to a file. % % See also: cv.HOGDescriptor.load % @@ -216,7 +241,7 @@ function delete(this) function readALTModel(this, modelfile) %READALTMODEL Read model from SVMlight format % - % hog.readALTModel(modelfile) + % hog.readALTModel(modelfile) % % ## Input % * __modelfile__ name of trained model file in SVMlight format. @@ -231,17 +256,17 @@ function readALTModel(this, modelfile) function sz = getDescriptorSize(this) %GETDESCRIPTORSIZE Returns the number of coefficients required for the classification % - % sz = hog.getDescriptorSize() + % sz = hog.getDescriptorSize() % % ## Output % * __sz__ a numeric value, descriptor size. % % The desriptor size is computed in the following way: % - % cells_per_block = hog.BlockSize ./ hog.CellSize - % histsize_per_block = prod(cells_per_block) * hog.NBins - % blocks_per_window = (hog.WinSize - hog.BlockSize) ./ hog.BlockStride + 1 - % descriptor_size = prod(blocks_per_window) * histsize_per_block + % cells_per_block = hog.BlockSize ./ hog.CellSize + % histsize_per_block = prod(cells_per_block) * hog.NBins + % blocks_per_window = (hog.WinSize - hog.BlockSize) ./ hog.BlockStride + 1 + % descriptor_size = prod(blocks_per_window) * histsize_per_block % % See also: cv.HOGDescriptor.compute % @@ -251,11 +276,13 @@ function readALTModel(this, modelfile) function tf = checkDetectorSize(this) %CHECKDETECTORSIZE Checks the size of the detector is valid % - % tf = hog.checkDetectorSize() + % tf = hog.checkDetectorSize() % % ## Output % * __tf__ a logical value, indicates validity of detector size. % + % Checks if detector size equal to descriptor size. + % % The detector is considered valid if the coefficients vector is % either empty or has length matching `hog.getDescriptorSize()` or % `hog.getDescriptorSize()+1`. @@ -268,7 +295,7 @@ function readALTModel(this, modelfile) function ws = getWinSigma(this) %GETWINSIGMA Get window sigma % - % ws = hog.getWinSigma() + % ws = hog.getWinSigma() % % ## Output % * __ws__ a numeric value, window sigma. @@ -284,36 +311,40 @@ function readALTModel(this, modelfile) function descs = compute(this, im, varargin) %COMPUTE Returns HOG block descriptors computed for the whole image % - % descs = hog.compute(im) - % descs = hog.compute(im, 'OptionName', optionValue, ...) + % descs = hog.compute(im) + % descs = hog.compute(im, 'OptionName', optionValue, ...) % % ## Input - % * __im__ 8-bit 1- or 3-channel source image. + % * __im__ 8-bit 1- or 3-channel source image where HOG features + % will be calculated. % % ## Output % * __descs__ Row vectors of HOG descriptors, with the number of - % columns equal to `hog.getDescriptorSize()`. + % columns equal to `hog.getDescriptorSize()`. Matrix of type + % `single`. % % ## Options % * __WinStride__ Window stride `[w,h]`. It must be a multiple of - % block stride. Not set by default in which case it uses - % `CellSize`. + % block stride. Not set by default in which case it uses + % `CellSize`. % * __Padding__ Optional padding `[w,h]`. default [0,0] % * __Locations__ cell array of 2D points `{[x,y],...}` at which - % descriptors are computed. Not set by default (in which - % case descriptors are computed for the whole image with a - % sliding window). + % descriptors are computed. Not set by default (in which case + % descriptors are computed for the whole image with a sliding + % window). + % + % Computes HOG descriptors of given image. % % In case of "dense" descriptors (i.e `Locations` is not set), the % number of rows is equal to the number of sliding windows over % the image. Assuming zero padding, this is computed in the % following way: % - % [h,w,~] = size(im); - % % numel(hog.WinSize(1):hog.CellSize(1):w) - % % numel(hog.WinSize(2):hog.CellSize(2):h) - % windows_per_img = ([w,h] - hog.WinSize) ./ WinStride + 1 - % num_windows = prod(windows_per_img) + % [h,w,~] = size(im); + % windows_per_img = ([w,h] - hog.WinSize) ./ WinStride + 1 + % num_windows = prod(windows_per_img) + % % num_windows = numel(hog.WinSize(1):WinStride(1):w) * ... + % % numel(hog.WinSize(2):WinStride(2):h) % % The windows cover the image in a top-to-bottom left-to-right % order. @@ -333,24 +364,24 @@ function readALTModel(this, modelfile) end function [grad, angleOfs] = computeGradient(this, im, varargin) - %COMPUTEGRADIENT Computes gradient + %COMPUTEGRADIENT Computes gradients and quantized gradient orientations % - % [grad, angleOfs] = hog.computeGradient(im) - % [...] = hog.computeGradient(im, 'OptionName', optionValue, ...) + % [grad, angleOfs] = hog.computeGradient(im) + % [...] = hog.computeGradient(im, 'OptionName', optionValue, ...) % % ## Input - % * __im__ 8-bit 1- or 3-channel source image. + % * __im__ 8-bit 1- or 3-channel source image to be computed. % % ## Output % * __grad__ gradient magnitudes (2-channel float matrix of same - % size as the image plus padding). + % size as the image plus padding). % * __angleOfs__ quantized gradient orientation with integers in - % the range `[0,NBins-1]` (2-channel 8-bit matrix of same - % size as the image plus the padding). + % the range `[0,NBins-1]` (2-channel 8-bit matrix of same size + % as the image plus the padding). % % ## Options - % * __paddingTL__ Optional padding. default [0,0] - % * __paddingBR__ Optional padding. default [0,0] + % * __paddingTL__ Optional padding from top-left. default [0,0] + % * __paddingBR__ Optional padding from bottom-right. default [0,0] % % See also: cv.HOGDescriptor.compute % @@ -360,33 +391,34 @@ function readALTModel(this, modelfile) function [pts, weights] = detect(this, im, varargin) %DETECT Performs object detection without a multi-scale window % - % [pts, weights] = hog.detect(im) - % [...] = hog.detect(im, 'OptionName', optionValue, ...) + % [pts, weights] = hog.detect(im) + % [...] = hog.detect(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ 8-bit 1- or 3-channel image where objects are detected. % % ## Output - % * __pts__ Left-top corner points of detected objects boundaries. - % A cell array of points where objects are found of the form - % `{[x,y], ...}`. The width and height of boundaries are - % specified by the `WinSize` parameter. - % * __weights__ Ouput vector of associated weights. + % * __pts__ Top-left corner points of detected objects boundaries. + % A cell array of points where objects are found of the form + % `{[x,y], ...}`. The width and height of boundaries are + % specified by the `WinSize` parameter. + % * __weights__ Ouput vector of associated weights. Contains + % confidence values for each detected object. % % ## Options % * __HitThreshold__ Threshold for the distance between features - % and SVM classifying plane. Usually it is 0 and should be - % specfied in the detector coefficients (as the last free - % coefficient). But if the free coefficient is omitted - % (which is allowed), you can specify it manually here. - % default 0 + % and SVM classifying plane. Usually it is 0 and should be + % specfied in the detector coefficients (as the last free + % coefficient). But if the free coefficient is omitted (which is + % allowed), you can specify it manually here. default 0 % * __WinStride__ Window stride `[w,h]`. It must be a multiple of - % block stride. Not set by default in which case it uses - % `CellSize`. + % block stride. Not set by default in which case it uses + % `CellSize`. % * __Padding__ Padding `[w,h]`. default [0,0] - % * __Locations__ cell array of 2-element points `{[x,y],...}` at - % which detector is executed. Not set by default, in which - % case the whole image is searched with a sliding window. + % * __Locations__ cell array of 2-element points `{[x,y],...}` of + % requrested locations at which detector is evaluated. Not set + % by default, in which case the whole image is searched with a + % sliding window. % % `SvmDetector` should be set before calling this method. % @@ -398,41 +430,44 @@ function readALTModel(this, modelfile) function [rcts, weights] = detectMultiScale(this, im, varargin) %DETECTMULTISCALE Performs object detection with a multi-scale window % - % [rcts, weights] = hog.detectMultiScale(im) - % [...] = hog.detectMultiScale(im, 'OptionName', optionValue, ...) + % [rcts, weights] = hog.detectMultiScale(im) + % [...] = hog.detectMultiScale(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ 8-bit 1- or 3-channel image where objects are detected. % % ## Output % * __rcts__ Detected objects boundaries. Cell array of rectangles - % where objects are found, of the form `{[x,y,w,h], ...}`. - % * __weights__ Vector of associated weights. + % where objects are found, of the form `{[x,y,w,h], ...}`. + % * __weights__ Vector of associated weights. Contain confidence + % values for each detected object. % % ## Options % * __HitThreshold__ Threshold for the distance between features - % and SVM classifying plane. Usually it is 0 and should be - % specfied in the detector coefficients (as the last free - % coefficient). But if the free coefficient is omitted - % (which is allowed), you can specify it manually here. - % default 0 + % and SVM classifying plane. Usually it is 0 and should be + % specfied in the detector coefficients (as the last free + % coefficient). But if the free coefficient is omitted (which is + % allowed), you can specify it manually here. default 0 % * __WinStride__ Window stride `[w,h]`. It must be a multiple of - % block stride. Not set by default in which case it uses - % `BlockStride`. + % block stride. Not set by default in which case it uses + % `BlockStride`. % * __Padding__ Padding `[w,h]`. default [0,0] % * __Scale__ Coefficient of the detection window increase. - % default 1.05 + % default 1.05 % * __FinalThreshold__ Coefficient to regulate the similarity - % threshold. When detected, some objects can be covered by - % many rectangles. 0 means not to perform grouping. See - % cv.groupRectangles. default 2.0 + % threshold. When detected, some objects can be covered by many + % rectangles. 0 means not to perform grouping. See + % cv.groupRectangles. default 2.0 % * __UseMeanshiftGrouping__ Flag to use meanshift grouping or the - % default grouping based on merging overlapped rectangles. - % When false, cv.HOGDescriptor.groupRectangles is performed - % and the value of `FinalThreshold` is used for - % `GroupThreshold`. When true, cv.groupRectangles_meanshift - % is performed instead, and `FinalThreshold` is used for the - % `DetectThreshold` option. default false + % default grouping based on merging overlapped rectangles. When + % false, cv.HOGDescriptor.groupRectangles is performed and the + % value of `FinalThreshold` is used for `GroupThreshold`. When + % true, cv.groupRectangles_meanshift is performed instead, and + % `FinalThreshold` is used for the `DetectThreshold` option. + % default false + % + % Detects objects of different sizes in the input image. The + % detected objects are returned as a list of rectangles. % % `SvmDetector` should be set before calling this method. % @@ -452,35 +487,33 @@ function readALTModel(this, modelfile) function [pts, confidences] = detectROI(this, im, locations, varargin) %DETECTROI Evaluate specified ROI and return confidence value for each location % - % [pts, confidences] = hog.detectROI(im, locations) - % [...] = hog.detectROI(..., 'OptionName',optionValue, ...) + % [pts, confidences] = hog.detectROI(im, locations) + % [...] = hog.detectROI(..., 'OptionName',optionValue, ...) % % ## Input % * __im__ 8-bit 1- or 3-channel image where objects are detected. % * __locations__ cell array of 2-element points `{[x,y],...}` at - % which detector is executed. These are top-left corner - % of candidate points at which to detect objects. + % which detector is executed. These are top-left corner of + % candidate points at which to detect objects. % % ## Output - % * __pts__ Left-top corner points of detected objects boundaries. - % A cell array of points where objects are found of the form - % `{[x,y], ...}`. The width and height of boundaries are - % specified by the `WinSize` parameter. These are the - % filtered `locations` at which objects where actually - % detected. + % * __pts__ Top-left corner points of detected objects boundaries. + % A cell array of points where objects are found of the form + % `{[x,y], ...}`. The width and height of boundaries are + % specified by the `WinSize` parameter. These are the filtered + % `locations` at which objects where actually detected. % * __confidences__ vector of confidences for each of the - % candidate locations (prediction of the SVM classifier). + % candidate locations (prediction of the SVM classifier). % % ## Options % * __HitThreshold__ Threshold for the distance between features - % and SVM classifying plane. Usually it is 0 and should be - % specfied in the detector coefficients (as the last free - % coefficient). But if the free coefficient is omitted - % (which is allowed), you can specify it manually here. - % default 0 + % and SVM classifying plane. Usually it is 0 and should be + % specfied in the detector coefficients (as the last free + % coefficient). But if the free coefficient is omitted (which is + % allowed), you can specify it manually here. default 0 % * __WinStride__ Window stride `[w,h]`. It must be a multiple of - % block stride. Not set by default in which case it uses - % `CellSize`. + % block stride. Not set by default in which case it uses + % `CellSize`. % * __Padding__ Padding `[w,h]`. default [0,0] % % See also: cv.HOGDescriptor.detect @@ -491,40 +524,38 @@ function readALTModel(this, modelfile) function [rcts, locations] = detectMultiScaleROI(this, im, locations, varargin) %DETECTMULTISCALEROI Evaluate specified ROI and return confidence value for each location in multiple scales % - % [rcts, locations] = hog.detectMultiScaleROI(im, locations) - % [...] = hog.detectMultiScaleROI(..., 'OptionName',optionValue, ...) + % [rcts, locations] = hog.detectMultiScaleROI(im, locations) + % [...] = hog.detectMultiScaleROI(..., 'OptionName',optionValue, ...) % % ## Input % * __im__ 8-bit 1- or 3-channel image where objects are detected. % * __locations__ input detection region of interest. It specifies - % candidate locations to search for object detections at - % different scales. An struct array with the following - % fields: - % * __scale__ scale (size) of the bounding box, scalar. - % * __locations__ set of requrested locations to be - % evaluated, cell array of points `{[x,y], ...}`. - % * __confidences__ vector that will contain confidence - % values for each location. Not required on input, - % this will be filled/updated on output. + % candidate locations to search for object detections at + % different scales. An struct array with the following fields: + % * __scale__ scale (size) of the bounding box, scalar. + % * __locations__ set of requrested locations to be evaluated, + % cell array of points `{[x,y], ...}`. + % * __confidences__ vector that will contain confidence values + % for each location. Not required on input, this will be + % filled/updated on output. % % ## Output % * __rcts__ Detected objects boundaries. Cell array of rectangles - % where objects are found, of the form `{[x,y,w,h], ...}`. + % where objects are found, of the form `{[x,y,w,h], ...}`. % * __locations__ output updated `locations` struct array. All - % points are retained, but their confidences are updated. + % points are retained, but their confidences are updated. % % ## Options % * __HitThreshold__ Threshold for the distance between features - % and SVM classifying plane. Usually it is 0 and should be - % specfied in the detector coefficients (as the last free - % coefficient). But if the free coefficient is omitted - % (which is allowed), you can specify it manually here. - % default 0 + % and SVM classifying plane. Usually it is 0 and should be + % specfied in the detector coefficients (as the last free + % coefficient). But if the free coefficient is omitted (which is + % allowed), you can specify it manually here. default 0 % * __GroupThreshold__ Minimum possible number of rectangles in a - % group minus 1. The threshold is used on a group of - % rectangles to decide whether to retain it or not. If less - % than or equal to zero, no grouping is performed. See - % cv.groupRectangles. default 0 + % group minus 1. The threshold is used on a group of rectangles + % to decide whether to retain it or not. If less than or equal + % to zero, no grouping is performed. See cv.groupRectangles. + % default 0 % % See also: cv.HOGDescriptor.detectMultiScale % @@ -534,30 +565,31 @@ function readALTModel(this, modelfile) function [rects, weights] = groupRectangles(this, rects, weights, varargin) %GROUPRECTANGLES Groups the object candidate rectangles % - % [rects, weights] = hog.groupRectangles(rects, weights) - % [...] = hog.groupRectangles(..., 'OptionName',optionValue, ...) + % [rects, weights] = hog.groupRectangles(rects, weights) + % [...] = hog.groupRectangles(..., 'OptionName',optionValue, ...) % % ## Input % * __rects__ Input cell array of rectangles, where each rectangle - % is represented as a 4-element vector `{[x,y,w,h], ...}`, - % or a numeric Nx4/Nx1x4/1xNx4 array. + % is represented as a 4-element vector `{[x,y,w,h], ...}`, or a + % numeric Nx4/Nx1x4/1xNx4 array. % * __weights__ Input vector of associated weights. % % ## Output - % * __rects__ outupt updated rectangles. Grouped rectangles are - % the average of all rectangles in that cluster. - % * __weights__ output updated weights. Corresponding grouped - % weights are the maximum weights of all rectangles in that - % cluster. + % * __rects__ outupt updated rectangles. Includes retained and + % grouped rectangles. Grouped rectangles are the average of all + % rectangles in that cluster. + % * __weights__ output updated weights. Includes weights of + % retained and grouped rectangles. Corresponding grouped weights + % are the maximum weights of all rectangles in that cluster. % % ## Options % * __EPS__ Relative difference between sides of the rectangles to - % merge them into a group. default 0.2 + % merge them into a group. default 0.2 % * __GroupThreshold__ Minimum possible number of rectangles in a - % group minus 1. The threshold is used on a group of - % rectangles to decide whether to retain it or not. If less - % than or equal to zero, no grouping is performed. default 1 - % (i.e only groups with two or more rectangles are kept). + % group minus 1. The threshold is used on a group of rectangles + % to decide whether to retain it or not. If less than or equal + % to zero, no grouping is performed. default 1 (i.e only groups + % with two or more rectangles are kept). % % See also: cv.groupRectangles, cv.SimilarRects % diff --git a/+cv/HausdorffDistanceExtractor.m b/+cv/HausdorffDistanceExtractor.m index f94e3f4ce..c3ae048a3 100644 --- a/+cv/HausdorffDistanceExtractor.m +++ b/+cv/HausdorffDistanceExtractor.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -34,14 +35,14 @@ function this = HausdorffDistanceExtractor(varargin) %HAUSDORFFDISTANCEEXTRACTOR Constructor % - % obj = cv.HausdorffDistanceExtractor() - % obj = cv.HausdorffDistanceExtractor('OptionName',optionValue, ...) + % obj = cv.HausdorffDistanceExtractor() + % obj = cv.HausdorffDistanceExtractor('OptionName',optionValue, ...) % % ## Options % * __DistanceFlag__ see - % cv.HausdorffDistanceExtractor.DistanceFlag, default 'L2' + % cv.HausdorffDistanceExtractor.DistanceFlag, default 'L2' % * __RankProportion__ see - % cv.HausdorffDistanceExtractor.RankProportion, default 0.6 + % cv.HausdorffDistanceExtractor.RankProportion, default 0.6 % % See also: cv.HausdorffDistanceExtractor.computeDistance % @@ -51,7 +52,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.HausdorffDistanceExtractor % @@ -65,14 +66,14 @@ function delete(this) function dist = computeDistance(this, contour1, contour2) %COMPUTEDISTANCE Compute the shape distance between two shapes defined by its contours % - % dist = obj.computeDistance(contour1, contour2) + % dist = obj.computeDistance(contour1, contour2) % % ## Options % * __contour1__ Contour defining first shape. A numeric - % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points - % `{[x,y], ...}` + % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points + % `{[x,y], ...}` % * __contour2__ Contour defining second shape. Same format as - % `contours1`. + % `contours1`. % % ## Output % * __dist__ output distance. @@ -88,7 +89,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.HausdorffDistanceExtractor.empty, % cv.HausdorffDistanceExtractor.load @@ -99,11 +100,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.HausdorffDistanceExtractor.clear, % cv.HausdorffDistanceExtractor.load @@ -114,7 +115,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -130,21 +131,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -158,11 +159,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.HausdorffDistanceExtractor.save, % cv.HausdorffDistanceExtractor.load diff --git a/+cv/HoughCircles.m b/+cv/HoughCircles.m index 64b7ea027..5e32c2241 100644 --- a/+cv/HoughCircles.m +++ b/+cv/HoughCircles.m @@ -1,56 +1,54 @@ %HOUGHCIRCLES Finds circles in a grayscale image using the Hough transform % -% circles = cv.HoughCircles(image) -% circles = cv.HoughCircles(..., 'OptionName', optionValue, ...) +% circles = cv.HoughCircles(image) +% circles = cv.HoughCircles(..., 'OptionName', optionValue, ...) % % ## Input % * __image__ 8-bit, single-channel, grayscale input image. % % ## Output % * __circles__ Output vector of found circles. A cell-array of 3-element -% floating-point vectors `{[x, y, radius], ...}`. +% floating-point vectors `{[x, y, radius], ...}`. % % ## Options % * __Method__ Detection method. Currently, the only implemented method is -% 'Gradient' (default). One of: -% * __Standard__ classical or standard Hough transform. Every line is -% represented by two floating-point numbers `(rho,theta)`, where -% `rho` is a distance between `(0,0)` point and the line, and -% `theta` is the angle between x-axis and the normal to the line. -% Thus, the matrix must be (the created sequence will be) of -% `single` type with 2-channels. -% * __Probabilistic__ probabilistic Hough transform (more efficient in -% case if the picture contains a few long linear segments). It -% returns line segments rather than the whole line. Each segment -% is represented by starting and ending points, and the matrix -% must be (the created sequence will be) of the `int32` type with -% 4-channels. -% * __MultiScale__ multi-scale variant of the classical Hough transform. -% The lines are encoded the same way as 'Standard'. -% * __Gradient__ basically 21HT, described in [Yuen90]. +% 'Gradient' (default). One of: +% * __Standard__ classical or standard Hough transform. Every line is +% represented by two floating-point numbers `(rho,theta)`, where `rho` is +% a distance between `(0,0)` point and the line, and `theta` is the angle +% between x-axis and the normal to the line. Thus, the matrix must be (the +% created sequence will be) of `single` type with 2-channels. +% * __Probabilistic__ probabilistic Hough transform (more efficient in case +% if the picture contains a few long linear segments). It returns line +% segments rather than the whole line. Each segment is represented by +% starting and ending points, and the matrix must be (the created sequence +% will be) of the `int32` type with 4-channels. +% * __MultiScale__ multi-scale variant of the classical Hough transform. The +% lines are encoded the same way as 'Standard'. +% * __Gradient__ basically 21HT, described in [Yuen90]. % * __DP__ Inverse ratio of the accumulator resolution to the image resolution. -% For example, if `DP=1`, the accumulator has the same resolution as -% the input image. If `DP=2`, the accumulator has half as big width -% and height. default 1. +% For example, if `DP=1`, the accumulator has the same resolution as the +% input image. If `DP=2`, the accumulator has half as big width and height. +% default 1. % * __MinDist__ Minimum distance between the centers of the detected circles. -% If the parameter is too small, multiple neighbor circles may be -% falsely detected in addition to a true one. If it is too large, some -% circles may be missed. default `size(image,1)/8`. +% If the parameter is too small, multiple neighbor circles may be falsely +% detected in addition to a true one. If it is too large, some circles may +% be missed. default `size(image,1)/8`. % * __Param1__ First method-specific parameter. In case of 'Gradient', it is -% the higher threshold of the two passed to the cv.Canny edge detector -% (the lower one is twice smaller). default 100. +% the higher threshold of the two passed to the cv.Canny edge detector +% (the lower one is twice smaller). default 100. % * __Param2__ Second method-specific parameter. In case of 'Gradient', it is -% the accumulator threshold for the circle centers at the detection -% stage. The smaller it is, the more false circles may be detected. -% Circles, corresponding to the larger accumulator values, will be -% returned first. default 100. +% the accumulator threshold for the circle centers at the detection stage. +% The smaller it is, the more false circles may be detected. Circles, +% corresponding to the larger accumulator values, will be returned first. +% default 100. % * __MinRadius__ Minimum circle radius. default 0. % * __MaxRadius__ Maximum circle radius. default 0. % % The function finds circles in a grayscale image using a modification of the % Hough transform. % -% ## Note +% ### Note % Usually the function detects the centers of circles well. However, it may % fail to find correct radii. You can assist to the function by specifying the % radius range (`MinRadius` and `MaxRadius`) if you know it. Or, you may diff --git a/+cv/HoughLines.m b/+cv/HoughLines.m index 82dee9715..e680750ca 100644 --- a/+cv/HoughLines.m +++ b/+cv/HoughLines.m @@ -1,33 +1,33 @@ %HOUGHLINES Finds lines in a binary image using the standard Hough transform % -% lines = cv.HoughLines(image) -% lines = cv.HoughLines(..., 'OptionName', optionValue, ...) +% lines = cv.HoughLines(image) +% lines = cv.HoughLines(..., 'OptionName', optionValue, ...) % % ## Input % * __image__ 8-bit, single-channel binary source image. % % ## Output % * __lines__ Output vector of lines. A cell-array of 2-element vectors -% `{[rho,theta], ...}`. `rho` is the distance from the coordinate origin -% `(0,0)` (top-left corner of the image). `theta` is the line rotation -% angle in radians (0 ~ vertical line, pi/2 ~ horizontal line). +% `{[rho,theta], ...}`. `rho` is the distance from the coordinate origin +% `(0,0)` (top-left corner of the image). `theta` is the line rotation angle +% in radians (0 ~ vertical line, pi/2 ~ horizontal line). % % ## Options % * __Rho__ Distance resolution of the accumulator in pixels. default 1. % * __Theta__ Angle resolution of the accumulator in radians. default pi/180. % * __Threshold__ Accumulator threshold parameter. Only those lines are -% returned that get enough votes (`>Threshold`). default 80. +% returned that get enough votes (`>Threshold`). default 80. % * __SRN__ For the multi-scale Hough transform, it is a divisor for the -% distance resolution `Rho`. The coarse accumulator distance resolution -% is `Rho` and the accurate accumulator resolution is `Rho/SRN`. If both -% `SRN=0` and `STN=0`, the classical Hough transform is used. Otherwise, -% both these parameters should be positive. default 0. +% distance resolution `Rho`. The coarse accumulator distance resolution is +% `Rho` and the accurate accumulator resolution is `Rho/SRN`. If both +% `SRN=0` and `STN=0`, the classical Hough transform is used. Otherwise, +% both these parameters should be positive. default 0. % * __STN__ For the multi-scale Hough transform, it is a divisor for the -% distance resolution `Theta`. default 0. +% distance resolution `Theta`. default 0. % * __MinTheta__ For standard and multi-scale Hough transform, minimum angle -% to check for lines. Must fall between 0 and `MaxTheta`. default 0 +% to check for lines. Must fall between 0 and `MaxTheta`. default 0 % * __MaxTheta__ For standard and multi-scale Hough transform, maximum angle -% to check for lines. Must fall between `MinTheta` and pi. default pi +% to check for lines. Must fall between `MinTheta` and pi. default pi % % The function implements the standard or standard multi-scale Hough transform % algorithm for line detection. See diff --git a/+cv/HoughLinesP.m b/+cv/HoughLinesP.m index 6e8e9ce4d..16d046936 100644 --- a/+cv/HoughLinesP.m +++ b/+cv/HoughLinesP.m @@ -1,25 +1,25 @@ %HOUGHLINESP Finds line segments in a binary image using the probabilistic Hough transform % -% lines = cv.HoughLinesP(image) -% lines = cv.HoughLinesP(..., 'OptionName', optionValue, ...) +% lines = cv.HoughLinesP(image) +% lines = cv.HoughLinesP(..., 'OptionName', optionValue, ...) % % ## Input % * __image__ 8-bit, single-channel binary source image. % % ## Output % * __lines__ Output vector of lines. A cell-array of 4-element vectors -% `{[x1,y1,x2,y2], ...}`, where `(x1,y1)` and `(x2,y2)` are the ending -% points of each detected line segment. +% `{[x1,y1,x2,y2], ...}`, where `(x1,y1)` and `(x2,y2)` are the ending +% points of each detected line segment. % % ## Options % * __Rho__ Distance resolution of the accumulator in pixels. default 1. % * __Theta__ Angle resolution of the accumulator in radians. default pi/180. % * __Threshold__ Accumulator threshold parameter. Only those lines are -% returned that get enough votes (`>Threshold`). default 80. +% returned that get enough votes (`>Threshold`). default 80. % * __MinLineLength__ Minimum line length. Line segments shorter than that are -% rejected. default 0. +% rejected. default 0. % * __MaxLineGap__ Maximum allowed gap between points on the same line to link -% them. default 0. +% them. default 0. % % The function implements the probabilistic Hough transform algorithm for line % detection, described in [Matas00]. diff --git a/+cv/HuMoments.m b/+cv/HuMoments.m index 99d0fe3c2..ad9975120 100644 --- a/+cv/HuMoments.m +++ b/+cv/HuMoments.m @@ -1,6 +1,6 @@ %HUMOMENTS Calculates seven Hu invariants % -% hu = cv.HuMoments(mo) +% hu = cv.HuMoments(mo) % % ## Input % * __mo__ Input moments computed with cv.moments @@ -9,18 +9,18 @@ % * __hu__ Output Hu invariants, a vector of length 7. % % The function calculates seven Hu invariants (introduced in [Hu62]; see also -% [Image moment](http://en.wikipedia.org/wiki/Image_moment)), defined as: -% -% hu[0] = eta_20 + eta_02 -% hu[1] = (eta_20 - eta_02)^2 + 4*eta_11^2 -% hu[2] = (eta_30 - 3*eta_12)^2 + (3*eta_21 - eta_03)^2 -% hu[3] = (eta_30 + eta_12)^2 + (eta_21 + eta_03)^2 -% hu[4] = (eta_30 - 3*eta_12)*(eta_30 + eta_12)*[(eta_30 + eta_12)^2 - 3*(eta_21 + eta_03)^2] + -% (3*eta_21 - eta_03)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] -% hu[5] = (eta_20 - eta_02)*[(eta_30 + eta_12)^2 - (eta_21 + eta_03)^2] + -% 4*eta_11(eta_30 + eta_12)*(eta_21 + eta_03) -% hu[6] = (3*eta_21 - eta_03)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] - -% (eta_30 - 3*eta_12)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] +% [Image moment](https://en.wikipedia.org/wiki/Image_moment)), defined as: +% +% hu[0] = eta_20 + eta_02 +% hu[1] = (eta_20 - eta_02)^2 + 4*eta_11^2 +% hu[2] = (eta_30 - 3*eta_12)^2 + (3*eta_21 - eta_03)^2 +% hu[3] = (eta_30 + eta_12)^2 + (eta_21 + eta_03)^2 +% hu[4] = (eta_30 - 3*eta_12)*(eta_30 + eta_12)*[(eta_30 + eta_12)^2 - 3*(eta_21 + eta_03)^2] + +% (3*eta_21 - eta_03)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] +% hu[5] = (eta_20 - eta_02)*[(eta_30 + eta_12)^2 - (eta_21 + eta_03)^2] + +% 4*eta_11(eta_30 + eta_12)*(eta_21 + eta_03) +% hu[6] = (3*eta_21 - eta_03)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] - +% (eta_30 - 3*eta_12)*(eta_21 + eta_03)*[3*(eta_30 + eta_12)^2-(eta_21 + eta_03)^2] % % where `eta_ji` stands for `mo.nu[ji]`. % diff --git a/+cv/KAZE.m b/+cv/KAZE.m index 16ad0d5e0..066498427 100644 --- a/+cv/KAZE.m +++ b/+cv/KAZE.m @@ -16,7 +16,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -55,8 +56,8 @@ function this = KAZE(varargin) %KAZE The KAZE constructor % - % obj = cv.KAZE() - % obj = cv.KAZE(..., 'OptionName',optionValue, ...) + % obj = cv.KAZE() + % obj = cv.KAZE(..., 'OptionName',optionValue, ...) % % ## Options % * __Extended__ See cv.KAZE.Extended, default false @@ -74,7 +75,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.KAZE % @@ -85,7 +86,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -99,7 +100,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.KAZE.empty, cv.KAZE.load % @@ -109,11 +110,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.KAZE.clear, cv.KAZE.load % @@ -123,7 +124,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -139,21 +140,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -167,11 +168,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.KAZE.save, cv.KAZE.load % @@ -184,16 +185,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `L2` for KAZE. % @@ -205,11 +206,11 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size, either 64 or 128 (see the - % cv.KAZE.Extended property). + % cv.KAZE.Extended property). % % See also: cv.KAZE.descriptorType, cv.KAZE.compute % @@ -219,7 +220,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -234,27 +235,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.KAZE.compute, cv.KAZE.detectAndCompute % @@ -264,26 +263,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.KAZE.detect, cv.KAZE.detectAndCompute @@ -294,43 +293,43 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input `uint8`/`uint16`/`single` grayscale image. - % Internally image is converted to 32-bit floating-point in - % the [0,1] range. + % Internally image is converted to 32-bit floating-point in + % the [0,1] range. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a 64- or - % 128-element vector, as returned by cv.KAZE.descriptorSize, - % so the total size of descriptors will be - % `numel(keypoints) * obj.descriptorSize()`. A matrix of - % size N-by-(64/128) of class `single`, one row per keypoint. + % vectors of descriptors. Each descriptor is a 64 or 128 element + % vector, as returned by cv.KAZE.descriptorSize, so the total + % size of descriptors will be + % `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-(64/128) of class `single`, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.KAZE.detect, cv.KAZE.compute % diff --git a/+cv/KNearest.m b/+cv/KNearest.m index acb9435c5..88366517a 100644 --- a/+cv/KNearest.m +++ b/+cv/KNearest.m @@ -12,11 +12,11 @@ % % ## Example % - % Xtrain = [randn(20,4)+1; randn(20,4)-1]; % training samples - % Ytrain = int32([ones(20,1); zeros(20,1)]); % training labels - % knn = cv.KNearest(Xtrain, Ytrain); - % Xtest = randn(50,4); % testing samples - % Ytest = knn.predict(Xtest); % predictions + % Xtrain = [randn(20,4)+1; randn(20,4)-1]; % training samples + % Ytrain = int32([ones(20,1); zeros(20,1)]); % training labels + % knn = cv.KNearest(Xtrain, Ytrain); + % Xtest = randn(50,4); % testing samples + % Ytest = knn.predict(Xtest); % predictions % % ## References % [BEIS97]: @@ -62,9 +62,9 @@ % % * __BruteForce__ brute-force search. % * __KDTree__ Fast Nearest Neighbor Search using KD-trees. This - % implements D. Lowe BBF (Best-Bin-First) algorithm for the last - % approximate (or accurate) nearest neighbor search in - % multi-dimensional spaces. See [BEIS97]. + % implements D. Lowe BBF (Best-Bin-First) algorithm for the last + % approximate (or accurate) nearest neighbor search in + % multi-dimensional spaces. See [BEIS97]. AlgorithmType end @@ -73,8 +73,8 @@ function this = KNearest(varargin) %KNEAREST Creates/trains a K-Nearest Neighbors model % - % model = cv.KNearest() - % model = cv.KNearest(...) + % model = cv.KNearest() + % model = cv.KNearest(...) % % The first variant creates an empty model. Use cv.KNearest.train % to train the model. @@ -93,7 +93,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.KNearest % @@ -107,7 +107,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -123,11 +123,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.KNearest.clear, cv.KNearest.load % @@ -137,17 +137,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -161,23 +161,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -191,11 +190,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.KNearest.save, cv.KNearest.load % @@ -208,7 +207,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -221,7 +220,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -234,11 +233,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % See also: cv.KNearest.isTrained, cv.KNearest.IsClassifier % @@ -248,127 +247,121 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should have - % `single` type. By default, each row represents a sample - % (see the `Layout` option). + % `single` type. By default, each row represents a sample (see + % the `Layout` option). % * __responses__ matrix of associated responses, with training - % labels in case of classification and response values for - % regression. If the responses are scalar, they should be - % stored as a vector (as a single row or a single column - % matrix). The matrix should have type `single` or `int32` - % (in the former case the responses are considered as - % ordered (numerical) by default; in the latter case as - % categorical). You can override the defaults using the - % `VarType` option. + % labels in case of classification and response values for + % regression. If the responses are scalar, they should be stored + % as a vector (as a single row or a single column matrix). The + % matrix should have type `single` or `int32` (in the former + % case the responses are considered as ordered (numerical) by + % default; in the latter case as categorical). You can override + % the defaults using the `VarType` option. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __UpdateModel__ Specifies whether the model should be trained - % from scratch (`UpdateModel=false`), or should be updated - % using the new training data (`UpdateModel=true`) instead - % of being completely overwritten. default false + % from scratch (`UpdateModel=false`), or should be updated + % using the new training data (`UpdateModel=true`) instead of + % being completely overwritten. default false % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -391,10 +384,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -408,14 +401,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -429,8 +421,8 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples, floating-point matrix. @@ -441,12 +433,13 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Flags__ The optional predict flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % The method is an alias for cv.KNearest.findNearest, using % `DefaultK` as value for number of nearest neighbors. % - % See also: cv.KNearest.findNearest, cv.KNearest.train, cv.KNearest.calcError + % See also: cv.KNearest.findNearest, cv.KNearest.train, + % cv.KNearest.calcError % [results,f] = KNearest_(this.id, 'predict', samples, varargin{:}); end @@ -457,31 +450,31 @@ function load(this, fname_or_str, varargin) function [results,neiResp,dist,f] = findNearest(this, samples, K) %FINDNEAREST Finds the neighbors and predicts responses for input vectors % - % results = classifier.findNearest(samples, K) - % [results,neiResp,dists,f] = classifier.findNearest(samples, K) - % [...] = classifier.findNearest(..., 'OptionName', optionValue, ...) + % results = classifier.findNearest(samples, K) + % [results,neiResp,dists,f] = classifier.findNearest(samples, K) + % [...] = classifier.findNearest(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input samples stored by rows. It is a - % single-precision floating-point matrix of size - % `nsamples-by-nfeatures`. + % single-precision floating-point matrix of size + % `nsamples-by-nfeatures`. % * __K__ Number of used nearest neighbors. Should be greater than - % or equal to 1. + % or equal to 1. % % ## Output % * __results__ Vector with results of prediction (regression or - % classification) for each input sample. It is a - % single-precision floating-point vector with `nsamples` - % elements. + % classification) for each input sample. It is a + % single-precision floating-point vector with `nsamples` + % elements. % * __neiResp__ Optional output array for corresponding neighbor - % responses. It is a single-precision floating-point matrix - % of size `nsamples-by-K`. + % responses. It is a single-precision floating-point matrix of + % size `nsamples-by-K`. % * __dists__ Optional output distances from the input vectors to - % the corresponding neighbors. It is a single-precision - % floating-point matrix of size `nsamples-by-K`. + % the corresponding neighbors. It is a single-precision + % floating-point matrix of size `nsamples-by-K`. % * __f__ If only a single input vector is passed, the predicted - % value is returned by the method. Otherwise it returns the - % prediction of the first sample. + % value is returned by the method. Otherwise it returns the + % prediction of the first sample. % % For each input vector (a row of the matrix samples), the method % finds the `K` nearest neighbors. In case of regression, the diff --git a/+cv/KalmanFilter.m b/+cv/KalmanFilter.m index 36c520525..25d1b72d6 100644 --- a/+cv/KalmanFilter.m +++ b/+cv/KalmanFilter.m @@ -2,25 +2,25 @@ %KALMANFILTER Kalman filter class % % The class implements a standard - % [Kalman filter](http://en.wikipedia.org/wiki/Kalman_filter), [Welch95]. + % [Kalman filter](https://en.wikipedia.org/wiki/Kalman_filter), [Welch95]. % However, you can modify `transitionMatrix`, `controlMatrix`, and % `measurementMatrix` to get an extended Kalman filter functionality. % % ## Example % - % % initialization - % kf = cv.KalmanFilter(4,2); - % kf.statePre = [10;20;0;0]; % initial state prediction - % kf.transitionMatrix = [1,0,1,0; 0,1,0,1; 0,0,1,0; 0,0,0,1]; - % kf.measurementMatrix([1,4]) = 1; - % kf.processNoiseCov = eye(4) * 1e-4; - % kf.measurementNoiseCov = eye(2) * 1e-1; - % kf.errorCovPost = eye(4) * 0.1; + % % initialization + % kf = cv.KalmanFilter(4,2); + % kf.statePre = [10;20;0;0]; % initial state prediction + % kf.transitionMatrix = [1,0,1,0; 0,1,0,1; 0,0,1,0; 0,0,0,1]; + % kf.measurementMatrix([1,4]) = 1; + % kf.processNoiseCov = eye(4) * 1e-4; + % kf.measurementNoiseCov = eye(2) * 1e-1; + % kf.errorCovPost = eye(4) * 0.1; % - % % dynamics - % p_pred = kf.predict(); % update internal state - % measure = [11;21]; % measurement - % p_est = kf.correct(measure); % correct + % % dynamics + % p_pred = kf.predict(); % update internal state + % measure = [11;21]; % measurement + % p_est = kf.correct(measure); % correct % % ## References % [Welch95]: @@ -32,7 +32,8 @@ % properties (SetAccess = private) - id % Object id + % Object ID + id end properties (Dependent) @@ -64,9 +65,9 @@ function this = KalmanFilter(varargin) %KALMANFILTER KalmanFilter constructor % - % kf = cv.KalmanFilter() - % kf = cv.KalmanFilter(dynamParams, measureParams) - % kf = cv.KalmanFilter(..., 'OptionName', optionValue, ...) + % kf = cv.KalmanFilter() + % kf = cv.KalmanFilter(dynamParams, measureParams) + % kf = cv.KalmanFilter(..., 'OptionName', optionValue, ...) % % ## Input % * __dynamParams__ Dimensionality of the state. @@ -74,9 +75,9 @@ % % ## Options % * __ControlParams__ Dimensionality of the control vector. - % default 0 + % default 0 % * __Type__ Type of the created matrices that should be `single` - % or `double`. default `single` + % or `double`. default `single` % % The constructor invokes the cv.KalmanFilter.init method to % initialize the object with the passed parameters. @@ -90,7 +91,7 @@ function delete(this) %DELETE Destructor % - % kf.delete() + % kf.delete() % % See also: cv.KalmanFilter % @@ -101,8 +102,8 @@ function delete(this) function init(this, dynamParams, measureParams, varargin) %INIT Re-initializes Kalman filter. The previous content is destroyed % - % kf.init(dynamParams, measureParams) - % kf.init(..., 'OptionName', optionValue, ...) + % kf.init(dynamParams, measureParams) + % kf.init(..., 'OptionName', optionValue, ...) % % ## Input % * __dynamParams__ Dimensionality of the state. @@ -110,9 +111,9 @@ function init(this, dynamParams, measureParams, varargin) % % ## Options % * __ControlParams__ Dimensionality of the control vector. - % default 0 + % default 0 % * __Type__ Type of the created matrices that should be `single` - % or `double` (default). + % or `double` (default). % % See also: cv.KalmanFilter.KalmanFilter % @@ -122,7 +123,7 @@ function init(this, dynamParams, measureParams, varargin) function s = predict(this, varargin) %PREDICT Computes a predicted state % - % s = kf.predict('OptionName', optionValue, ...) + % s = kf.predict('OptionName', optionValue, ...) % % ## Output % * __s__ Output predicted state. @@ -138,7 +139,7 @@ function init(this, dynamParams, measureParams, varargin) function s = correct(this, measurement) %CORRECT Updates the predicted state from the measurement % - % s = kf.correct(measurement) + % s = kf.correct(measurement) % % ## Input % * __measurement__ The measured system parameters. diff --git a/+cv/KeyPointsFilter.m b/+cv/KeyPointsFilter.m index 55737481a..aa8f80ba0 100644 --- a/+cv/KeyPointsFilter.m +++ b/+cv/KeyPointsFilter.m @@ -19,14 +19,14 @@ % * __pt__ coordinates of the keypoint `[x,y]`. % * __size__ diameter of the meaningful keypoint neighborhood. % * __angle__ computed orientation of the keypoint (-1 if not applicable); - % it's in [0,360) degrees and measured relative to image coordinate - % system, ie in clockwise. + % it's in [0,360) degrees and measured relative to image coordinate + % system, ie in clockwise. % * __response__ the response by which the most strong keypoints have been - % selected. Can be used for the further sorting or subsampling. + % selected. Can be used for the further sorting or subsampling. % * __octave__ octave (pyramid layer) from which the keypoint has been - % extracted. + % extracted. % * **class_id** object class (if the keypoints need to be clustered by an - % object they belong to). + % object they belong to). % % See also: cv.FeatureDetector % @@ -36,7 +36,7 @@ function keypoints = runByImageBorder(keypoints, imgSize, borderSize) %RUNBYIMAGEBORDER Remove keypoints within borderPixels of an image edge % - % keypoints = cv.KeyPointsFilter.runByImageBorder(keypoints, imgSize, borderSize) + % keypoints = cv.KeyPointsFilter.runByImageBorder(keypoints, imgSize, borderSize) % % ## Input % * __keypoints__ input collection of keypoints. @@ -52,7 +52,7 @@ function keypoints = runByKeypointSize(keypoints, minSize, maxSize) %RUNBYKEYPOINTSIZE Remove keypoints of sizes out of range % - % keypoints = cv.KeyPointsFilter.runByKeypointSize(keypoints, minSize, maxSize) + % keypoints = cv.KeyPointsFilter.runByKeypointSize(keypoints, minSize, maxSize) % % ## Input % * __keypoints__ input collection of keypoints. @@ -68,7 +68,7 @@ function keypoints = runByPixelsMask(keypoints, mask) %RUNBYPIXELSMASK Remove keypoints from some image by mask for pixels of this image % - % keypoints = cv.KeyPointsFilter.runByPixelsMask(keypoints, mask) + % keypoints = cv.KeyPointsFilter.runByPixelsMask(keypoints, mask) % % ## Input % * __keypoints__ input collection of keypoints. @@ -83,7 +83,7 @@ function keypoints = removeDuplicated(keypoints) %REMOVEDUPLICATED Remove duplicated keypoints % - % keypoints = cv.KeyPointsFilter.removeDuplicated(keypoints) + % keypoints = cv.KeyPointsFilter.removeDuplicated(keypoints) % % ## Input % * __keypoints__ input collection of keypoints. @@ -94,10 +94,24 @@ keypoints = KeyPointsFilter_('removeDuplicated', keypoints); end + function keypoints = removeDuplicatedSorted(keypoints) + %REMOVEDUPLICATEDSORTED Remove duplicated keypoints and sort the remaining keypoints + % + % keypoints = cv.KeyPointsFilter.removeDuplicatedSorted(keypoints) + % + % ## Input + % * __keypoints__ input collection of keypoints. + % + % ## Output + % * __keypoints__ output filtered keypoints. + % + keypoints = KeyPointsFilter_('removeDuplicatedSorted', keypoints); + end + function keypoints = retainBest(keypoints, npoints) %RETAINBEST Retain the specified number of the best keypoints (according to the response) % - % keypoints = cv.KeyPointsFilter.retainBest(keypoints, npoints) + % keypoints = cv.KeyPointsFilter.retainBest(keypoints, npoints) % % ## Input % * __keypoints__ input collection of keypoints. @@ -117,20 +131,20 @@ function points2f = convertToPoints(keypoints, varargin) %CONVERTTOPOINTS Convert vector of keypoints to vector of points % - % points2f = cv.KeyPointsFilter.convertToPoints(keypoints) - % points2f = cv.KeyPointsFilter.convertToPoints(keypoints, 'OptionName',optionValue, ...) + % points2f = cv.KeyPointsFilter.convertToPoints(keypoints) + % points2f = cv.KeyPointsFilter.convertToPoints(keypoints, 'OptionName',optionValue, ...) % % ## Input % * __keypoints__ Keypoints obtained from any feature detection - % algorithm like SIFT/SURF/ORB. + % algorithm like SIFT/SURF/ORB. % % ## Output % * __points2f__ Array of (x,y) coordinates of each keypoint. % % ## Options % * __Indices__ Optional array of indexes of keypoints to be - % converted to points. (Acts like a mask to convert only - % specified keypoints). Not set by default. + % converted to points. (Acts like a mask to convert only + % specified keypoints). Not set by default. % % See also: cv.KeyPointsFilter.convertFromPoints % @@ -140,22 +154,22 @@ function keypoints = convertFromPoints(points2f, varargin) %CONVERTFROMPOINTS Convert vector of points to vector of keypoints, where each keypoint is assigned the same size and the same orientation % - % keypoints = cv.KeyPointsFilter.convertFromPoints(points2f) - % keypoints = cv.KeyPointsFilter.convertFromPoints(points2f, 'OptionName',optionValue, ...) + % keypoints = cv.KeyPointsFilter.convertFromPoints(points2f) + % keypoints = cv.KeyPointsFilter.convertFromPoints(points2f, 'OptionName',optionValue, ...) % % ## Input % * __points2f__ Array of (x,y) coordinates of each keypoint. % % ## Output % * __keypoints__ Keypoints similar to those obtained from any - % feature detection algorithm like SIFT/SURF/ORB. + % feature detection algorithm like SIFT/SURF/ORB. % % ## Options % * __Size__ keypoint diameter. % * __Response__ keypoint detector response on the keypoint (that - % is, strength of the keypoint). + % is, strength of the keypoint). % * __Octave__ pyramid octave in which the keypoint has been - % detected. + % detected. % * __ClassId__ object id. % % See also: cv.KeyPointsFilter.convertToPoints @@ -166,7 +180,7 @@ function ovrl = overlap(kp1, kp2) %OVERLAP Compute overlap for pair of keypoints % - % ovrl = cv.KeyPointsFilter.overlap(kp1, kp2) + % ovrl = cv.KeyPointsFilter.overlap(kp1, kp2) % % ## Input % * __kp1__ First keypoint. @@ -174,10 +188,10 @@ % % ## Output % * __ovrl__ Overlap is the ratio between area of keypoint - % regions' intersection and area of keypoint regions' union - % (considering keypoint region as circle). If they don't - % overlap, we get zero. If they coincide at same location - % with same size, we get 1. + % regions' intersection and area of keypoint regions' union + % (considering keypoint region as circle). If they don't + % overlap, we get zero. If they coincide at same location with + % same size, we get 1. % ovrl = KeyPointsFilter_('overlap', kp1, kp2); end @@ -185,7 +199,7 @@ function val = hash(kp) %HASH Compute hash of a keypoint % - % val = cv.KeyPointsFilter.hash(kp) + % val = cv.KeyPointsFilter.hash(kp) % % ## Input % * __kp__ input keypoint. diff --git a/+cv/LDA.m b/+cv/LDA.m index 1fc7fec0f..2cde5782f 100644 --- a/+cv/LDA.m +++ b/+cv/LDA.m @@ -3,20 +3,21 @@ % % ## Example % - % Xtrain = randn(100,5); - % labels = randi([1 3], [100 1]); - % Xtest = randn(100,5); + % Xtrain = randn(100,5); + % labels = randi([1 3], [100 1]); + % Xtest = randn(100,5); % - % lda = cv.LDA('NumComponents',3); - % lda.compute(Xtrain, labels); - % Y = lda.project(Xtest); - % Xapprox = lda.reconstruct(Y); + % lda = cv.LDA('NumComponents',3); + % lda.compute(Xtrain, labels); + % Y = lda.project(Xtest); + % Xapprox = lda.reconstruct(Y); % % See also: cv.PCA, fitcdiscr % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent, SetAccess = private) @@ -30,13 +31,13 @@ function this = LDA(varargin) %LDA Constructor, initializes a LDA object % - % lda = cv.LDA() - % lda = cv.LDA('OptionName', optionValue, ...) + % lda = cv.LDA() + % lda = cv.LDA('OptionName', optionValue, ...) % % ## Options % * __NumComponents__ number of components (default 0). If 0 (or - % less) number of components are given, they are - % automatically determined for given data in computation. + % less) number of components are given, they are automatically + % determined for given data in computation. % % See also: cv.LDA.compute % @@ -46,7 +47,7 @@ function delete(this) %DELETE Destructor % - % lda.delete() + % lda.delete() % % See also: cv.LDA % @@ -57,17 +58,17 @@ function delete(this) function load(this, fname_or_str, varargin) %LOAD Deserializes this object from a given filename % - % lda.load(filename) - % lda.load(str, 'FromString',true) + % lda.load(filename) + % lda.load(str, 'FromString',true) % % ## Input % * __filename__ name of file to load % * __str__ String containing serialized object you want to load. % % ## Options - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized object. - % default false + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized object. + % default false % % See also: cv.LDA.save % @@ -77,15 +78,15 @@ function load(this, fname_or_str, varargin) function varargout = save(this, filename) %SAVE Serializes this object to a given filename % - % lda.save(filename) - % str = lda.save(filename) + % lda.save(filename) + % str = lda.save(filename) % % ## Input % * __filename__ name of file to save % % ## Output % * __str__ optional output. If requested, the object is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % See also: cv.LDA.load % @@ -95,14 +96,14 @@ function load(this, fname_or_str, varargin) function compute(this, src, labels) %COMPUTE Compute the discriminants for data and labels % - % lda.compute(src, labels) + % lda.compute(src, labels) % % ## Input % * __src__ data samples (matrix of rows of size `N-by-d`, or a - % cell-array of `N` vectors each of length `d`). - % Floating-point type. + % cell-array of `N` vectors each of length `d`). Floating-point + % type. % * __labels__ corresponding labels (vector of length `N`). - % Integer type. + % Integer type. % % Performs a Discriminant Analysis with Fisher's Optimization % Criterion on given data in `src` and corresponding labels in @@ -116,7 +117,7 @@ function compute(this, src, labels) function m = project(this, src) %PROJECT Projects samples into the LDA subspace % - % m = lda.project(src) + % m = lda.project(src) % % ## Input % * __src__ data sampels (matrix of size N-by-d) @@ -132,7 +133,7 @@ function compute(this, src, labels) function m = reconstruct(this, src) %RECONSTRUCT Reconstructs projections from the LDA subspace % - % m = lda.reconstruct(src) + % m = lda.reconstruct(src) % % ## Input % * __src__ projected data (matrix of size N-by-ncomponents) @@ -150,7 +151,7 @@ function compute(this, src, labels) function dst = subspaceProject(W, mn, src) %SUBSPACEPROJECT Projects samples % - % dst = cv.LDA.subspaceProject(W, mn, src) + % dst = cv.LDA.subspaceProject(W, mn, src) % % ## Input % * __W__ eigenvectors. @@ -170,7 +171,7 @@ function compute(this, src, labels) function dst = subspaceReconstruct(W, mn, src) %SUBSPACERECONSTRUCT Reconstructs projections % - % dst = cv.LDA.subspaceReconstruct(W, mn, src) + % dst = cv.LDA.subspaceReconstruct(W, mn, src) % % ## Input % * __W__ eigenvectors. diff --git a/+cv/LUT.m b/+cv/LUT.m index 92a9831b8..b3d7ca7cf 100644 --- a/+cv/LUT.m +++ b/+cv/LUT.m @@ -1,28 +1,28 @@ %LUT Performs a look-up table transform of an array % -% dst = cv.LUT(src, lut) +% dst = cv.LUT(src, lut) % % ## Input % * __src__ input array of 8-bit elements (`uint8` or `int8`). % * __lut__ look-up table of 256 elements; in case of multi-channel input -% array `src`, the table should either have a single channel (in this -% case the same table is used for all channels) or the same number of -% channels as in the input array. +% array `src`, the table should either have a single channel (in this case +% the same table is used for all channels) or the same number of channels as +% in the input array. % % ## Output % * __dst__ output array of the same size and number of channels as `src`, and -% the same depth as `lut`. +% the same depth as `lut`. % % The function cv.LUT fills the output array with values from the look-up % table. Indices of the entries are taken from the input array. That is, the % function processes each element of src as follows: % -% dst(I) = lut(src(I) + d) +% dst(I) = lut(src(I) + d) % % where: % -% d = { 0 if src has uint8 depth -% { 128 if src has int8 depth +% d = { 0 if src has uint8 depth +% { 128 if src has int8 depth % % See also: cv.convertScaleAbs, cv.convertTo, cv.applyColorMap, intlut % diff --git a/+cv/Laplacian.m b/+cv/Laplacian.m index 3faa8ccb7..c687ba308 100644 --- a/+cv/Laplacian.m +++ b/+cv/Laplacian.m @@ -1,38 +1,38 @@ %LAPLACIAN Calculates the Laplacian of an image % -% dst = cv.Laplacian(src) -% dst = cv.Laplacian(..., 'OptionName', optionValue, ...) +% dst = cv.Laplacian(src) +% dst = cv.Laplacian(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ Source image. % % ## Output % * __dst__ Destination image of the same size and the same number of channels -% as `src`. +% as `src`. % % ## Options % * __DDepth__ Desired depth of the destination image. default -1, which uses -% the same type as the input `src`. +% the same type as the input `src`. % * __KSize__ Aperture size used to compute the second-derivative filters. See -% cv.getDerivKernels for details. The size must be positive and odd. -% default 1 +% cv.getDerivKernels for details. The size must be positive and odd. +% default 1 % * __Scale__ Optional scale factor for the computed Laplacian values. By -% default, no scaling is applied. See cv.getDerivKernels for details. -% default 1 +% default, no scaling is applied. See cv.getDerivKernels for details. +% default 1 % * __Delta__ Optional delta value that is added to the results prior to -% storing them in `dst`. default 0 +% storing them in `dst`. default 0 % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. -% Default 'Default' +% Default 'Default' % % The function calculates the Laplacian of the source image by adding up the % second x and y derivatives calculated using the Sobel operator: % -% dst = \Delta src = d^2(src)/dx^2 + d^2(src)/dy^2 +% dst = \Delta src = d^2(src)/dx^2 + d^2(src)/dy^2 % % This is done when `KSize > 1`. When `KSize = 1`, the Laplacian is computed % by filtering the image with the following 3x3 aperture: % -% [0, 1, 0; 1,-4, 1; 0, 1, 0] +% [0, 1, 0; 1,-4, 1; 0, 1, 0] % % See also: cv.Sobel, cv.Scharr, imfilter, fspecial % diff --git a/+cv/LineIterator.m b/+cv/LineIterator.m index 02e25f770..de534ef25 100644 --- a/+cv/LineIterator.m +++ b/+cv/LineIterator.m @@ -1,7 +1,7 @@ %LINEITERATOR Raster line iterator % -% [pos, count] = cv.LineIterator(img, pt1, pt2) -% [...] = cv.LineIterator(..., 'OptionName',optionValue, ...) +% [pos, count] = cv.LineIterator(img, pt1, pt2) +% [...] = cv.LineIterator(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image @@ -36,17 +36,17 @@ % % ## Example % -% img = imread(fullfile(mexopencv.root(),'test','fruits.jpg')); -% pt1 = [10 15]; % 0-based -% pt2 = [200 100]; % 0-based -% -% % grabs pixels along the line (pt1, pt2) from 8-bit 3-channel image -% [xy,num] = cv.LineIterator(img, pt1, pt2, 'Connectivity',8); -% xy = xy + 1; % 1-based subscripts -% c = zeros(num, size(img,3), class(img)); -% for i=1:num -% c(i,:) = img(xy(i,2), xy(i,1), :); -% end +% img = imread(fullfile(mexopencv.root(),'test','fruits.jpg')); +% pt1 = [10 15]; % 0-based +% pt2 = [200 100]; % 0-based +% +% % grabs pixels along the line (pt1, pt2) from 8-bit 3-channel image +% [xy,num] = cv.LineIterator(img, pt1, pt2, 'Connectivity',8); +% xy = xy + 1; % 1-based subscripts +% c = zeros(num, size(img,3), class(img)); +% for i=1:num +% c(i,:) = img(xy(i,2), xy(i,1), :); +% end % % See also: improfile % diff --git a/+cv/LineSegmentDetector.m b/+cv/LineSegmentDetector.m index 28f57d4bd..d850a3c41 100644 --- a/+cv/LineSegmentDetector.m +++ b/+cv/LineSegmentDetector.m @@ -13,39 +13,39 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = LineSegmentDetector(varargin) %LINESEGMENTDETECTOR Creates a LineSegmentDetector object and initializes it % - % lsd = cv.LineSegmentDetector() - % lsd = cv.LineSegmentDetector('OptionName', optionValue, ...) + % lsd = cv.LineSegmentDetector() + % lsd = cv.LineSegmentDetector('OptionName', optionValue, ...) % % ## Options % * __Refine__ The way found lines will be refined, one of: - % * __None__ No refinement applied. - % * __Standard__ (default) Standard refinement is applied. - % E.g. breaking arches into smaller straighter line - % approximations. - % * __Advanced__ Advanced refinement. Number of false alarms - % is calculated, lines are refined through increase of - % precision, decrement in size, etc. + % * __None__ No refinement applied. + % * __Standard__ (default) Standard refinement is applied. E.g. + % breaking arches into smaller straighter line approximations. + % * __Advanced__ Advanced refinement. Number of false alarms is + % calculated, lines are refined through increase of precision, + % decrement in size, etc. % * __Scale__ The scale of the image that will be used to find the - % lines. In the range `[0,1)`. default 0.8 + % lines. In the range `[0,1)`. default 0.8 % * __SigmaScale__ Sigma for Gaussian filter. It is computed as - % `sigma = SigmaScale/Scale`. default 0.6 + % `sigma = SigmaScale/Scale`. default 0.6 % * __QuantError__ Bound to the quantization error on the gradient - % norm. default 2.0 + % norm. default 2.0 % * __AngleTol__ Gradient angle tolerance in degrees. default 22.5 % * __DetectionThreshold__ Detection threshold: - % `-log10(NFA) > DetectionThreshold`. Used only when - % advancent refinement is chosen. default 0 + % `-log10(NFA) > DetectionThreshold`. Used only when advancent + % refinement is chosen. default 0 % * __MinDensity__ Minimal density of aligned region points in the - % enclosing rectangle. default 0.7 + % enclosing rectangle. default 0.7 % * __NBins__ Number of bins in pseudo-ordering of gradient - % modulus. default 1024 + % modulus. default 1024 % % The cv.LineSegmentDetector algorithm is defined using the % standard values. Only advanced users may want to edit those, as @@ -59,7 +59,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LineSegmentDetector % @@ -72,7 +72,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LineSegmentDetector.empty % @@ -82,11 +82,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if algorithm object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm object is empty - % (e.g. in the very beginning or after unsuccessful read). + % (e.g. in the very beginning or after unsuccessful read). % % See also: cv.LineSegmentDetector.clear % @@ -96,11 +96,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LineSegmentDetector.save, cv.LineSegmentDetector.load % @@ -110,7 +110,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -125,21 +125,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. @@ -154,29 +154,29 @@ function load(this, fname_or_str, varargin) function [lines, width, prec, nfa] = detect(this, img) %DETECT Finds lines in the input image % - % lines = lsd.detect(img) - % [lines, width, prec, nfa] = lsd.detect(img) + % lines = lsd.detect(img) + % [lines, width, prec, nfa] = lsd.detect(img) % % ## Input % * __img__ A grayscale (`uint8`) input image. % % ## Output % * __lines__ A cell-array of 4-element vectors of the form - % `{[x1, y1, x2, y2], ..}` specifying the beginning and ending - % point of a line. Where point 1 `[x1,y1]` is the start, - % point 2 `[x2,y2] the end. Returned lines are strictly - % oriented depending on the gradient. + % `{[x1, y1, x2, y2], ..}` specifying the beginning and ending + % point of a line. Where point 1 `[x1,y1]` is the start, point 2 + % `[x2,y2] the end. Returned lines are strictly oriented + % depending on the gradient. % * __width__ Vector of widths of the regions, where the lines are - % found. E.g. Width of line. + % found. E.g. Width of line. % * __prec__ Vector of precisions with which the lines are found. % * __nfa__ Vector containing number of false alarms in the line - % region, with precision of 10%. The bigger the value, - % logarithmically better the detection. This vector will be - % calculated only when the object refine type is 'Advanced', - % empty otherwise. - % * -1 corresponds to 10 mean false alarms - % * 0 corresponds to 1 mean false alarm - % * 1 corresponds to 0.1 mean false alarms + % region, with precision of 10%. The bigger the value, + % logarithmically better the detection. This vector will be + % calculated only when the object refine type is 'Advanced', + % empty otherwise. + % * -1 corresponds to 10 mean false alarms + % * 0 corresponds to 1 mean false alarm + % * 1 corresponds to 0.1 mean false alarms % % See also: cv.LineSegmentDetector.drawSegments % @@ -186,11 +186,11 @@ function load(this, fname_or_str, varargin) function img = drawSegments(this, img, lines) %DRAWSEGMENTS Draws the line segments on a given image % - % img = lsd.drawSegments(img, lines) + % img = lsd.drawSegments(img, lines) % % ## Input % * __img__ The image, where the lines will be drawn. Should be - % bigger or equal to the image, where the lines were found. + % bigger or equal to the image, where the lines were found. % * __lines__ A vector of the lines that needed to be drawn. % % ## Output @@ -204,16 +204,16 @@ function load(this, fname_or_str, varargin) function [img, count] = compareSegments(this, sz, lines1, lines2, varargin) %COMPARESEGMENTS Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels % - % [img, count] = lsd.compareSegments(sz, lines1, lines2) - % [...] = lsd.compareSegments(..., 'OptionName', optionValue, ...) + % [img, count] = lsd.compareSegments(sz, lines1, lines2) + % [...] = lsd.compareSegments(..., 'OptionName', optionValue, ...) % % ## Input - % * __sz__ The size of the image, where `lines1` and `lines2` - % were found `[w,h]`. - % * __lines1__ The first group of lines that needs to be drawn. - % It is visualized in blue color. + % * __sz__ The size of the image, where `lines1` and `lines2` were + % found `[w,h]`. + % * __lines1__ The first group of lines that needs to be drawn. It + % is visualized in blue color. % * __lines2__ The second group of lines. They visualized in red - % color. + % color. % % ## Output % * __img__ color image with the two groups of lines drawn. @@ -221,8 +221,8 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Image__ Optional image, where the lines will be drawn. The - % image should be color (3-channel) in order for `lines1` - % and `lines2` to be drawn in the above mentioned colors. + % image should be color (3-channel) in order for `lines1` and + % `lines2` to be drawn in the above mentioned colors. % % See also: cv.LineSegmentDetector.drawSegments % diff --git a/+cv/LogisticRegression.m b/+cv/LogisticRegression.m index 45f332f8f..7fd14abc2 100644 --- a/+cv/LogisticRegression.m +++ b/+cv/LogisticRegression.m @@ -56,19 +56,19 @@ % A sample set of training parameters for the Logistic Regression % classifier can be initialized as follows: % - % lr = cv.LogisticRegression(); - % lr.LearningRate = 0.05; - % lr.Iterations = 1000; - % lr.Regularization = 'L2'; - % lr.TrainMethod = 'MiniBatch'; - % lr.MiniBatchSize = 10; + % lr = cv.LogisticRegression(); + % lr.LearningRate = 0.05; + % lr.Iterations = 1000; + % lr.Regularization = 'L2'; + % lr.TrainMethod = 'MiniBatch'; + % lr.MiniBatchSize = 10; % % ## References % [LogRegWiki]: - % > [Logistic regression](http://en.wikipedia.org/wiki/Logistic_regression) + % > [Logistic regression](https://en.wikipedia.org/wiki/Logistic_regression) % % [BatchDesWiki]: - % > [Gradient descent optimization](http://en.wikipedia.org/wiki/Gradient_descent_optimization) + % > [Gradient descent optimization](https://en.wikipedia.org/wiki/Gradient_descent_optimization) % % [LogRegTomMitch]: % > "Generative and Discriminative Classifiers: Naive Bayes and Logistic @@ -120,7 +120,7 @@ % % * __Batch__ batch gradient descent. % * __MiniBatch__ Mini-Batch Gradient Descent. Set `MiniBatchSize` to - % a positive integer when using this method. + % a positive integer when using this method. TrainMethod % Number of training samples taken in each step of Mini-Batch Gradient % Descent. @@ -144,8 +144,8 @@ function this = LogisticRegression(varargin) %LOGISTICREGRESSION Creates/trains a logistic regression model % - % model = cv.LogisticRegression() - % model = cv.LogisticRegression(...) + % model = cv.LogisticRegression() + % model = cv.LogisticRegression(...) % % The first variant creates Logistic Regression model with default % parameters. @@ -164,7 +164,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.LogisticRegression % @@ -178,7 +178,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -194,11 +194,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.LogisticRegression.clear, cv.LogisticRegression.load % @@ -208,17 +208,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -232,23 +232,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -262,11 +261,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LogisticRegression.save, cv.LogisticRegression.load % @@ -279,11 +278,11 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples (plus one to - % account for the implicitly prepended bias/intercept term). + % account for the implicitly prepended bias/intercept term). % % See also: cv.LogisticRegression.train % @@ -293,7 +292,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -306,11 +305,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % Always true for logistic regression. % @@ -322,115 +321,110 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the statistical model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should have - % `single` type. By default, each row represents a sample - % (see the `Layout` option). + % `single` type. By default, each row represents a sample (see + % the `Layout` option). % * __responses__ matrix of associated responses. A vector of - % categorical labels, stored in an array of type `single`. + % categorical labels, stored in an array of type `single`. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % of whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -446,10 +440,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -463,14 +457,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -484,30 +477,29 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts responses for input samples % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input data for the prediction algorithm. `MxN` - % matrix, where each row contains variables (features) of - % one object being classified. Should have `single` data - % type. + % matrix, where each row contains variables (features) of one + % object being classified. Should have `single` data type. % % ## Output % * __results__ Predicted labels as a column vector of `int32` - % type. In case `RawOutput` was set, it returns a `single` - % matrix of size `size(samples,1)-by-size(thetas,1)` which - % contains the raw output of the sigmoid function(s). + % type. In case `RawOutput` was set, it returns a `single` + % matrix of size `size(samples,1)-by-size(thetas,1)` which + % contains the raw output of the sigmoid function(s). % * __f__ The same as the response of the first sample, i.e - % `results(1)`. + % `results(1)`. % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % value of the sigmoid function), not the class label. - % default false + % value of the sigmoid function), not the class label. + % default false % % See also: cv.LogisticRegression.train, cv.LogisticRegression.calcError % @@ -520,13 +512,13 @@ function load(this, fname_or_str, varargin) function thetas = getLearntThetas(this) %GETLEARNTTHETAS Returns the trained paramters % - % thetas = model.getLearntThetas() + % thetas = model.getLearntThetas() % % ## Output % * __thetas__ It returns learnt paramters of the Logistic - % Regression as a matrix of type `single` arranged across - % rows. For a two-class classifcation problem, it returns a - % single row matrix. + % Regression as a matrix of type `single` arranged across rows. + % For a two-class classifcation problem, it returns a single row + % matrix. % % `thetas` is a matrix of size `nclasses-by-model.getVarCount()` % if `nclasses>2`, otherwise `1-by-model.getVarCount()` if diff --git a/+cv/MSER.m b/+cv/MSER.m index 0c8a71ca4..4c534561a 100644 --- a/+cv/MSER.m +++ b/+cv/MSER.m @@ -13,7 +13,7 @@ % % ## References % [WikiArticle]: - % > [MSER](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions) + % > [MSER](https://en.wikipedia.org/wiki/Maximally_stable_extremal_regions) % % [Nister08]: % > David Nister and Henrik Stewenius. "Linear Time Maximally Stable @@ -30,7 +30,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -55,28 +56,28 @@ function this = MSER(varargin) %MSER Full constructor for MSER detector % - % obj = cv.MSER() - % obj = cv.MSER(..., 'OptionName',optionValue, ...) + % obj = cv.MSER() + % obj = cv.MSER(..., 'OptionName',optionValue, ...) % % ## Options % * __Delta__ delta, in the code, it compares - % `(size_{i}-size_{i-delta})/size_{i-delta}`. default 5. + % `(size_{i}-size_{i-delta})/size_{i-delta}`. default 5. % * __MinArea__ prune the area which smaller than minArea. - % default 60. + % default 60. % * __MaxArea__ prune the area which bigger than maxArea. - % default 14400. + % default 14400. % * __MaxVariation__ prune the area have simliar size to its - % children. default 0.25 + % children. default 0.25 % * __MinDiversity__ for color image, trace back to cut off mser - % with diversity less than `MinDiversity`. default 0.2. + % with diversity less than `MinDiversity`. default 0.2. % * __MaxEvolution__ for color image, the evolution steps. - % default 200. + % default 200. % * __AreaThreshold__ for color image, the area threshold to cause - % re-initialize. default 1.01. + % re-initialize. default 1.01. % * __MinMargin__ for color image, ignore too small margin. - % default 0.003. + % default 0.003. % * __EdgeBlurSize__ for color image, the aperture size for edge - % blur. default 5. + % blur. default 5. % % See also: cv.MSER.detectRegions, cv.MSER.detect % @@ -86,7 +87,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MSER % @@ -97,7 +98,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -111,7 +112,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MSER.empty, cv.MSER.load % @@ -121,11 +122,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MSER.clear, cv.MSER.load % @@ -135,7 +136,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -151,21 +152,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -179,11 +180,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MSER.save, cv.MSER.load % @@ -196,27 +197,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale/color image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.MSER.detectRegions % @@ -229,18 +228,18 @@ function load(this, fname_or_str, varargin) function [msers, bboxes] = detectRegions(this, img) %DETECTREGIONS Maximally stable extremal region extractor % - % [msers, bboxes] = obj.detectRegions(img) + % [msers, bboxes] = obj.detectRegions(img) % % ## Input % * __img__ Input 8-bit grayscale or color image (supports - % 1/3/4-channels). Must be greater or equal than 3x3. + % 1/3/4-channels). Must be greater or equal than 3x3. % % ## Output - % * __msers__ The output vector of connected points (list of - % point sets). Cell-array of 2D points matrices - % `{[x,y; x,y; ..], [x,y; x,y; ..], ..}`. + % * __msers__ The output vector of connected points (list of point + % sets). Cell-array of 2D points matrices + % `{[x,y; x,y; ..], [x,y; x,y; ..], ..}`. % * __bboxes__ Output matrix of rectangles (bounding boxes). A - % N-by-4 matrix `[x,y,width,height; ...]`. + % N-by-4 matrix `[x,y,width,height; ...]`. % % Runs the extractor on the specified image; returns the MSER % regions, each encoded as a contour (see cv.findContours). diff --git a/+cv/Mahalanobis.m b/+cv/Mahalanobis.m index bef84ca81..d2cef458c 100644 --- a/+cv/Mahalanobis.m +++ b/+cv/Mahalanobis.m @@ -1,6 +1,6 @@ %MAHALANOBIS Calculates the Mahalanobis distance between two vectors % -% d = cv.Mahalanobis(v1, v2, icovar) +% d = cv.Mahalanobis(v1, v2, icovar) % % ## Input % * __v1__ first 1D input vector. @@ -13,7 +13,7 @@ % The function cv.Mahalanobis calculates and returns the weighted distance % between two vectors: % -% d(v1,v2) = sqrt( sum_{i,j} [icovar(i,j) * (v1(I) - v2(I)) * (v1(j) - v2(j))] ) +% d(v1,v2) = sqrt( sum_{i,j} [icovar(i,j) * (v1(I) - v2(I)) * (v1(j) - v2(j))] ) % % The covariance matrix may be calculated using the cv.calcCovarMatrix % function and then inverted using the cv.invert function (preferably using diff --git a/+cv/MergeDebevec.m b/+cv/MergeDebevec.m index 3d1f4212f..c9b5f62a1 100644 --- a/+cv/MergeDebevec.m +++ b/+cv/MergeDebevec.m @@ -16,7 +16,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% MergeDebevec @@ -24,7 +25,7 @@ function this = MergeDebevec() %MERGEDEBEVEC Creates MergeDebevec object % - % obj = cv.MergeDebevec() + % obj = cv.MergeDebevec() % % See also: cv.MergeDebevec.process % @@ -34,7 +35,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MergeDebevec % @@ -48,16 +49,16 @@ function delete(this) function dst = process(this, src, etimes, varargin) %PROCESS Merges images % - % dst = obj.process(src, etimes) - % dst = obj.process(src, etimes, response) + % dst = obj.process(src, etimes) + % dst = obj.process(src, etimes, response) % % ## Input % * __src__ vector of input images, all of the same size and - % `uint8` type. + % `uint8` type. % * __etimes__ vector of exposure time values for each image. % * __response__ 256x1xCN `single` matrix with inverse camera - % response function (CRF) for each pixel value, it should - % have the same number of channels as images `src{i}`. + % response function (CRF) for each pixel value, it should have + % the same number of channels as images `src{i}`. % % ## Output % * __dst__ result image, same size as `src{i}` and `single` type. @@ -76,7 +77,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MergeDebevec.empty, cv.MergeDebevec.load % @@ -86,11 +87,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MergeDebevec.clear, cv.MergeDebevec.load % @@ -100,11 +101,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MergeDebevec.save, cv.MergeDebevec.load % @@ -114,7 +115,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -130,21 +131,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/MergeMertens.m b/+cv/MergeMertens.m index d0c6bf8e5..b190fc73b 100644 --- a/+cv/MergeMertens.m +++ b/+cv/MergeMertens.m @@ -23,7 +23,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -40,8 +41,8 @@ function this = MergeMertens(varargin) %MERGEMERTENS Creates MergeMertens object % - % obj = cv.MergeMertens() - % obj = cv.MergeMertens('OptionName',optionValue, ...) + % obj = cv.MergeMertens() + % obj = cv.MergeMertens('OptionName',optionValue, ...) % % ## Options % * __ContrastWeight__ default 1.0 @@ -56,7 +57,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MergeMertens % @@ -70,11 +71,11 @@ function delete(this) function dst = process(this, src) %PROCESS Merges images % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ vector of input images (1- or 3-channels), all of the - % same size and `uint8` type. + % same size and `uint8` type. % % ## Output % * __dst__ result image, same size as `src{i}` and `single` type. @@ -90,7 +91,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MergeMertens.empty, cv.MergeMertens.load % @@ -100,11 +101,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MergeMertens.clear, cv.MergeMertens.load % @@ -114,11 +115,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MergeMertens.save, cv.MergeMertens.load % @@ -128,7 +129,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -144,21 +145,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/MergeRobertson.m b/+cv/MergeRobertson.m index 591f591de..36c7fb8cd 100644 --- a/+cv/MergeRobertson.m +++ b/+cv/MergeRobertson.m @@ -17,7 +17,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% MergeRobertson @@ -25,7 +26,7 @@ function this = MergeRobertson() %MERGEROBERTSON Creates MergeRobertson object % - % obj = cv.MergeRobertson() + % obj = cv.MergeRobertson() % % See also: cv.MergeRobertson.process % @@ -35,7 +36,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MergeRobertson % @@ -49,16 +50,16 @@ function delete(this) function dst = process(this, src, etimes, varargin) %PROCESS Merges images % - % dst = obj.process(src, etimes) - % dst = obj.process(src, etimes, response) + % dst = obj.process(src, etimes) + % dst = obj.process(src, etimes, response) % % ## Input % * __src__ vector of input images, all of the same size and - % `uint8` type. + % `uint8` type. % * __etimes__ vector of exposure time values for each image. % * __response__ 256x1xCN `single` matrix with inverse camera - % response function (CRF) for each pixel value, it should - % have the same number of channels as images `src{i}`. + % response function (CRF) for each pixel value, it should have + % the same number of channels as images `src{i}`. % % ## Output % * __dst__ result image, same size as `src{i}` and `single` type. @@ -77,7 +78,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MergeRobertson.empty, cv.MergeRobertson.load % @@ -87,11 +88,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MergeRobertson.clear, cv.MergeRobertson.load % @@ -101,11 +102,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MergeRobertson.save, cv.MergeRobertson.load % @@ -115,7 +116,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -131,21 +132,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/Net.m b/+cv/Net.m index 4ffdb10f2..d2204c37c 100644 --- a/+cv/Net.m +++ b/+cv/Net.m @@ -14,6 +14,8 @@ % computations (i. e. network testing). A network training is in principle % not supported. % + % https://github.com/opencv/opencv/wiki/Deep-Learning-in-OpenCV + % % ## Net class % Neural network is presented as directed acyclic graph (DAG), where % vertices are Layer instances, and edges specify relationships between @@ -22,86 +24,75 @@ % Each network layer has unique integer id and unique string name inside % its network. LayerId can store either layer name or layer id. % - % See also: cv.Net.Net, cv.Net.import, nnet.cnn.layer.Layer, trainNetwork, + % See also: cv.Net.Net, nnet.cnn.layer.Layer, trainNetwork, % SeriesNetwork, importCaffeNetwork, importCaffeLayers, alexnet, vgg16, % vgg19 % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor methods function this = Net(varargin) - %NET Default constructor - % - % net = cv.Net() - % net = cv.Net(...) - % - % The first variant creates an empty network. + %NET Constructor and importer of trained serialized models from different dnn-frameworks % - % The second variant accepts the same parameters as the import - % method, in which case it forwards the call after construction. + % net = cv.Net() % - % See also: cv.Net.import, cv.Net.forward + % net = cv.Net('Caffe', prototxt) + % net = cv.Net('Caffe', prototxt, caffeModel) % - this.id = Net_(0, 'new'); - if nargin > 0 - this.import(varargin{:}); - end - end - - function delete(this) - %DELETE Destructor + % net = cv.Net('Tensorflow', model) + % net = cv.Net('Tensorflow', model, config) % - % net.delete() + % net = cv.Net('Torch', filename) + % net = cv.Net('Torch', filename, isBinary) % - % See also: cv.Net - % - if isempty(this.id), return; end - Net_(this.id, 'delete'); - end - end - - %% Importer - methods - function import(this, ntype, varargin) - %IMPORT Load trained serialized models of different dnn-frameworks - % - % net.import('Caffe', prototxt) - % net.import('Caffe', prototxt, caffeModel) - % - % net.import('Tensorflow', model) - % - % net.import('Torch', filename) - % net.import('Torch', filename, isBinary) + % net = cv.Net('Darknet', cfgFile) + % net = cv.Net('Darknet', cfgFile, darknetModel) % % ## Input - % * __prototxt__ path to the .prototxt file with text description - % of the network architecture. - % * __caffeModel__ (optional) path to the .caffemodel file with - % learned network. Empty by default. + % * __prototxt__ path to the `.prototxt` file with text + % description of the network architecture. + % * __caffeModel__ (optional) path to the `.caffemodel` file with + % learned network. Empty by default. + % * __model__ path to the `.pb` file with binary protobuf + % description of the network architecture. Binary serialized + % TensorFlow graph includes weights. + % * __config__ Optional path to the `.pbtxt` file with text + % definition of TensorFlow graph. More flexible than binary + % format and may be used to build the network using binary + % format only as a weights storage. This approach is similar to + % Caffe's `.prorotxt` and `.caffemodel`. % * __filename__ path to the file, dumped from Torch by using - % `torch.save()` function. + % `torch.save()` function. % * __isBinary__ specifies whether the network was serialized in - % ascii mode or binary. default true. - % * __model__ path to the .pb file with binary protobuf - % description of the network architecture. + % ascii mode or binary. default true. + % * __cfgFile__ path to the `.cfg` file with text description of + % the network architecture. + % * __darknetModel__ (optional) path to the `.weights` file with + % learned network. % - % Creates importer and adds loaded layers into the net and sets - % connections between them. + % The first variant creates an empty network. % - % The first variant reads a network model stored in + % The second variant reads a network model stored in % [Caffe](http://caffe.berkeleyvision.org) model files. % - % The second variant is an importer of - % [TensorFlow](http://www.tensorflow.org) framework network. + % The third variant is an importer of + % [TensorFlow](https://www.tensorflow.org) framework network. % - % The third variant is an importer of [Torch7](http://torch.ch) + % The fourth variant is an importer of [Torch7](http://torch.ch) % framework network. % - % ## Notes for Torch + % The fifth variant reads a network model stored in + % [Darknet](https://pjreddie.com/darknet/) model files. + % + % The importers first create a net, add loaded layers into it, and + % set connections between them. + % + % ### Notes for Torch % % Warning: Torch7 importer is experimental now, you need % explicitly set CMake flag to compile it. @@ -130,9 +121,20 @@ function import(this, ntype, varargin) % Also some equivalents of these classes from cunn, cudnn, and % fbcunn may be successfully imported. % + % See also: cv.Net, cv.Net.forward + % + this.id = Net_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % net.delete() + % % See also: cv.Net % - Net_(this.id, 'import', ntype, varargin{:}); + if isempty(this.id), return; end + Net_(this.id, 'delete'); end end @@ -141,14 +143,14 @@ function import(this, ntype, varargin) function setInput(this, blob, name) %SETINPUT Sets the new value for the layer output blob % - % net.setInput(blob) - % net.setInput(blob, name) + % net.setInput(blob) + % net.setInput(blob, name) % % ## Input % * __blob__ new blob, constructed from an image or an array of - % images. - % * __name__ descriptor of the updating layer output blob. - % See cv.Net.connect to know format of the descriptor. + % images. + % * __name__ descriptor of the updating layer output blob. See + % cv.Net.connect to know format of the descriptor. % % NOTE: If updating blob is not empty then `blob` must have the % same shape, because network reshaping is not implemented yet. @@ -170,7 +172,7 @@ function setInput(this, blob, name) function setParam(this, layerId, numParam, blob) %SETPARAM Sets the new value for the learned param of the layer % - % net.setParam(layerId, numParam, blob) + % net.setParam(layerId, numParam, blob) % % ## Input % * __layerId__ name or id of the layer. @@ -188,13 +190,13 @@ function setParam(this, layerId, numParam, blob) function blob = getParam(this, layerId, numParam) %GETPARAM Returns parameter blob of the layer % - % blob = net.getParam(layerId) - % blob = net.getParam(layerId, numParam) + % blob = net.getParam(layerId) + % blob = net.getParam(layerId, numParam) % % ## Input % * __layerId__ name or id of the layer. % * __numParam__ index of the layer parameter in the blobs array. - % default 0. + % default 0. % % ## Output % * __blob__ returned parameter blob. @@ -213,20 +215,20 @@ function setParam(this, layerId, numParam, blob) function blob = forward(this, varargin) %FORWARD Runs forward pass % - % blob = net.forward() - % blob = net.forward(outputName) + % blob = net.forward() + % blob = net.forward(outputName) % - % blobs = net.forward(outBlobNames) + % blobs = net.forward(outBlobNames) % % ## Input % * __outputName__ name for layer which output is needed to get. % * __outBlobNames__ names for layers which outputs are needed to - % get. + % get. % % ## Output % * __blob__ blob for first output of specified layer. % * __blobs__ blobs for first outputs of specified layers - % (cell array). + % (cell array). % % The first form runs forward pass to compute output of layer % with name `outputName`. By default (`outputName` not specified) @@ -238,7 +240,7 @@ function setParam(this, layerId, numParam, blob) % listed in `outBlobNames`. It returns blobs for first outputs of % specified layers. % - % See also: cv.Net.forwardAll, cv.Net.Net, cv.Net.import + % See also: cv.Net.forwardAll, cv.Net.Net % blob = Net_(this.id, 'forward', varargin{:}); end @@ -246,21 +248,21 @@ function setParam(this, layerId, numParam, blob) function blobs = forwardAll(this, varargin) %FORWARDALL Runs forward pass % - % blobs = net.forwardAll() - % blobs = net.forwardAll(outputName) + % blobs = net.forwardAll() + % blobs = net.forwardAll(outputName) % - % blobsArr = net.forwardAll(outBlobNames) + % blobsArr = net.forwardAll(outBlobNames) % % ## Input % * __outputName__ name for layer which output is needed to get. % * __outBlobNames__ names for layers which outputs are needed to - % get. + % get. % % ## Output % * __blobs__ contains all output blobs for specified layer - % (cell array) + % (cell array) % * __blobsArr__ contains all output blobs for each layer - % specified in `outBlobNames` (cell array of cell arrays). + % specified in `outBlobNames` (cell array of cell arrays). % % The first form runs forward pass to compute output of layer % with name `outputName`. By default (`outputName` not specified) @@ -272,7 +274,7 @@ function setParam(this, layerId, numParam, blob) % listed in `outBlobNames`. It returns all output blobs for each % layer specified in `outBlobNames`. % - % See also: cv.Net.forward, cv.Net.Net, cv.Net.import + % See also: cv.Net.forward, cv.Net.Net % blobs = Net_(this.id, 'forwardAll', varargin{:}); end @@ -280,7 +282,7 @@ function setParam(this, layerId, numParam, blob) function forwardOpt(this, toLayerId) %FORWARDOPT Optimized forward % - % net.forwardOpt(toLayerId) + % net.forwardOpt(toLayerId) % % ## Input % * __toLayerId__ layer name or layer id (one or several). @@ -294,6 +296,24 @@ function forwardOpt(this, toLayerId) % Net_(this.id, 'forwardOpt', toLayerId); end + + function [timings, total] = getPerfProfile(this) + %GETPERFPROFILE Returns overall time for inference and timings (in ticks) for layers + % + % [timings, total] = net.getPerfProfile() + % + % ## Output + % * __timings__ vector for tick timings for all layers. + % * __total__ overall ticks for model inference. + % + % Indexes in returned vector correspond to layers ids. Some layers + % can be fused with others, in this case zero ticks count will be + % return for that skipped layers. + % + % See also: cv.Net.forward, cv.TickMeter + % + [timings, total] = Net_(this.id, 'getPerfProfile'); + end end %% Net (network architecture) @@ -301,12 +321,12 @@ function forwardOpt(this, toLayerId) function b = empty(this) %EMPTY Returns true if there are no layers in the network. % - % b = net.empty() + % b = net.empty() % % ## Output % * __b__ Boolean. % - % See also: cv.Net.import + % See also: cv.Net.Net % b = Net_(this.id, 'empty'); end @@ -314,26 +334,26 @@ function forwardOpt(this, toLayerId) function id = addLayer(this, name, layerType, params) %ADDLAYER Adds new layer to the net % - % id = net.addLayer(name, layerType, params) + % id = net.addLayer(name, layerType, params) % % ## Input % * __name__ unique name of the adding layer. % * __layerType__ typename of the adding layer (type must be - % registered). + % registered). % * __params__ parameters which will be used to initialize the - % creating layer. Scalar structure with the following fields: - % * __dict__ name-value dictionary as struct, values are - % scalar values (or arrays) of one of the following - % type: double, integer, or string. - % * __blobs__ List of learned parameters stored as blobs. - % * __name__ Name of the layer instance (optional, can be - % used internal purposes). - % * __type__ Type name which was used for creating layer by - % layer factory (optional). + % creating layer. Scalar structure with the following fields: + % * __dict__ name-value dictionary as struct, values are scalar + % values (or arrays) of one of the following type: double, + % integer, or string. + % * __blobs__ List of learned parameters stored as blobs. + % * __name__ Name of the layer instance (optional, can be used + % internal purposes). + % * __type__ Type name which was used for creating layer by + % layer factory (optional). % % ## Output % * __id__ unique identifier of created layer, or -1 if a failure - % will happen. + % will happen. % % A LayerParams provides all data needed to initialize layer. It % includes dictionary with scalar params (`params.dict` struct), @@ -341,9 +361,8 @@ function forwardOpt(this, toLayerId) % `params.name` and `params.type` of layer instance. % % Built-in layers listed below partially reproduce functionality - % of corresponding Caffe and Torch7 layers. - % In partuclar, the following layers and Caffe importer were - % tested to reproduce + % of corresponding Caffe and Torch7 layers. In partuclar, the + % following layers and Caffe importer were tested to reproduce % [Caffe](http://caffe.berkeleyvision.org/tutorial/layers.html) % functionality: % - Convolution @@ -360,33 +379,35 @@ function forwardOpt(this, toLayerId) % See also: cv.Net.addLayerToPrev, cv.Net.deleteLayer, cv.Net.connect % id = Net_(this.id, 'addLayer', name, layerType, params); + id = int32(id); end function id = addLayerToPrev(this, name, layerType, params) %ADDLAYERTOPREV Adds new layer and connects its first input to the first output of previously added layer % - % id = net.addLayerToPrev(name, layerType, params) + % id = net.addLayerToPrev(name, layerType, params) % % ## Input % * __name__ unique name of the adding layer. % * __layerType__ typename of the adding layer (type must be - % registered). + % registered). % * __params__ parameters which will be used to initialize the - % creating layer. + % creating layer. % % ## Output % * __id__ unique identifier of created layer, or -1 if a failure - % will happen. + % will happen. % % See also: cv.Net.addLayer, cv.Net.deleteLayer, cv.Net.connect % id = Net_(this.id, 'addLayerToPrev', name, layerType, params); + id = int32(id); end function id = getLayerId(this, name) %GETLAYERID Converts string name of the layer to the integer identifier % - % id = net.getLayerId(name) + % id = net.getLayerId(name) % % ## Input % * __name__ string name of the layer. @@ -397,12 +418,13 @@ function forwardOpt(this, toLayerId) % See also: cv.Net.getLayer, cv.Net.getLayerNames % id = Net_(this.id, 'getLayerId', name); + id = int32(id); end function names = getLayerNames(this) %GETLAYERNAMES Get layer names % - % names = net.getLayerNames() + % names = net.getLayerNames() % % ## Output % * __names__ names of layers. @@ -415,20 +437,22 @@ function forwardOpt(this, toLayerId) function layer = getLayer(this, layerId) %GETLAYER Returns layer with specified id or name which the network use % - % layer = net.getLayer(layerId) + % layer = net.getLayer(layerId) % % ## Input % * __layerId__ layer name or layer id. % % ## Output % * __layer__ returned layer. Scalar structure with the following - % fields: - % * __blobs__ List of stored learned parameters as returned - % by cv.Net.getParam. - % * __name__ name of the layer instance, can be used for - % logging or other internal purposes. - % * __type__ Type name which was used for creating layer by - % layer factory. + % fields: + % * __blobs__ List of stored learned parameters as returned by + % cv.Net.getParam. + % * __name__ name of the layer instance, can be used for logging + % or other internal purposes. + % * __type__ Type name which was used for creating layer by + % layer factory. + % * __preferableTarget__ preferred target for layer forwarding + % (see cv.Net.setPreferableTarget). % % Layers are the building blocks of networks. % @@ -440,7 +464,7 @@ function forwardOpt(this, toLayerId) function layers = getLayerInputs(this, layerId) %GETLAYERINPUTS Returns input layers of specific layer % - % layers = net.getLayerInputs(layerId) + % layers = net.getLayerInputs(layerId) % % ## Input % * __layerId__ layer name or layer id. @@ -456,7 +480,7 @@ function forwardOpt(this, toLayerId) function deleteLayer(this, layerId) %DELETELAYER Delete layer for the network % - % net.deleteLayer(layerId) + % net.deleteLayer(layerId) % % ## Input % * __layerId__ layer name or layer id. @@ -471,8 +495,8 @@ function deleteLayer(this, layerId) function connect(this, varargin) %CONNECT Connects output of the first layer to input of the second layer % - % net.connect(outPin, inpPin) - % net.connect(outLayerId, outNum, inpLayerId, inpNum) + % net.connect(outPin, inpPin) + % net.connect(outLayerId, outNum, inpLayerId, inpNum) % % ## Input % * __outPin__ descriptor of the first layer output. See below. @@ -501,7 +525,7 @@ function connect(this, varargin) function setInputsNames(this, inputBlobNames) %SETINPUTSNAMES Sets outputs names of the network input pseudo layer % - % net.setInputsNames(inputBlobNames) + % net.setInputsNames(inputBlobNames) % % ## Input % * __inputBlobNames__ blob names. @@ -521,7 +545,7 @@ function setInputsNames(this, inputBlobNames) function indices = getUnconnectedOutLayers(this) %GETUNCONNECTEDOUTLAYERS Returns indexes of layers with unconnected outputs % - % indices = net.getUnconnectedOutLayers() + % indices = net.getUnconnectedOutLayers() % % ## Output % * __indices__ vector of indices. @@ -534,7 +558,7 @@ function setInputsNames(this, inputBlobNames) function layersTypes = getLayerTypes(this) %GETLAYERTYPES Returns list of types for layer used in model % - % layersTypes = net.getLayerTypes() + % layersTypes = net.getLayerTypes() % % ## Output % * __layersTypes__ layer types. @@ -547,7 +571,7 @@ function setInputsNames(this, inputBlobNames) function count = getLayersCount(this, layerType) %GETLAYERSCOUNT Returns count of layers of specified type % - % count = net.getLayersCount(layerType) + % count = net.getLayersCount(layerType) % % ## Input % * __layerType__ type. @@ -563,11 +587,11 @@ function setInputsNames(this, inputBlobNames) function enableFusion(this, fusion) %ENABLEFUSION Enables or disables layer fusion in the network % - % net.enableFusion(fusion) + % net.enableFusion(fusion) % % ## Input % * __fusion__ true to enable the fusion, false to disable. The - % fusion is enabled by default. + % fusion is enabled by default. % % See also: cv.Net.connect % @@ -577,18 +601,18 @@ function enableFusion(this, fusion) function setHalideScheduler(this, scheduler) %SETHALIDESCHEDULER Compile Halide layers % - % net.setHalideScheduler(scheduler) + % net.setHalideScheduler(scheduler) % % ## Input % * __scheduler__ scheduler Path to YAML file with scheduling - % directives. + % directives. % % Schedule layers that support Halide backend. Then compile them % for specific target. For layers that not represented in % scheduling file or if no manual scheduling used at all, % automatic scheduling will be applied. % - % See also: cv.setPreferableBackend + % See also: cv.Net.setPreferableBackend % Net_(this.id, 'setHalideScheduler', scheduler); end @@ -596,14 +620,14 @@ function setHalideScheduler(this, scheduler) function setPreferableBackend(this, backend) %SETPREFERABLEBACKEND Ask network to use specific computation backend where it supported % - % net.setPreferableBackend(backend) + % net.setPreferableBackend(backend) % % ## Input % * __backend__ computation backend supported by layers, one of: - % * __Default__ - % * __Halide__ + % * __Default__ + % * __Halide__ % - % See also: cv.setPreferableTarget, cv.Net.setHalideScheduler + % See also: cv.Net.setPreferableTarget, cv.Net.setHalideScheduler % Net_(this.id, 'setPreferableBackend', backend); end @@ -611,14 +635,14 @@ function setPreferableBackend(this, backend) function setPreferableTarget(this, target) %SETPREFERABLETARGET Ask network to make computations on specific target device % - % net.setPreferableTarget(target) + % net.setPreferableTarget(target) % % ## Input % * __target__ target device for computations, one of: - % * __CPU__ - % * __OpenCL__ + % * __CPU__ + % * __OpenCL__ % - % See also: cv.setPreferableBackend + % See also: cv.Net.setPreferableBackend % Net_(this.id, 'setPreferableTarget', target); end @@ -629,8 +653,8 @@ function setPreferableTarget(this, target) function blob = readTorchBlob(filename, varargin) %READTORCHBLOB Loads blob which was serialized as torch.Tensor object of Torch7 framework % - % blob = cv.Net.readTorchBlob(filename) - % blob = cv.Net.readTorchBlob(filename, 'OptionName',optionValue, ...) + % blob = cv.Net.readTorchBlob(filename) + % blob = cv.Net.readTorchBlob(filename, 'OptionName',optionValue, ...) % % ## Input % * __filename__ path to the blob file. @@ -640,9 +664,9 @@ function setPreferableTarget(this, target) % % ## Options % * __IsBinary__ specifies whether blob file was serialized in - % ascii mode or binary. default true. + % ascii mode or binary. default true. % - % This function has the same limitations as cv.Net.import with + % This function has the same limitations as cv.Net.Net with % regards to the Torch importer. % % See also: cv.Net.setInput, cv.Net.blobFromImages @@ -653,9 +677,9 @@ function setPreferableTarget(this, target) function blob = blobFromImages(img, varargin) %BLOBFROMIMAGES Creates 4-dimensional blob from image or series of images % - % blob = cv.Net.blobFromImages(img) - % blob = cv.Net.blobFromImages(imgs) - % blob = cv.Net.blobFromImages(..., 'OptionName',optionValue, ...) + % blob = cv.Net.blobFromImages(img) + % blob = cv.Net.blobFromImages(imgs) + % blob = cv.Net.blobFromImages(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image (with 1- or 3-channels). @@ -666,22 +690,28 @@ function setPreferableTarget(this, target) % % ## Options % * __Size__ spatial size for output image `[w,h]`. default [0,0] - % (in which case input image size is used) + % (in which case input image size is used) % * __Mean__ scalar with mean values which are subtracted from - % channels. Values are intended to be in - % (mean-R, mean-G, mean-B) order if image has BGR ordering - % and `SwapRB` is true. default [0,0,0] + % channels. Values are intended to be in + % (mean-R, mean-G, mean-B) order if image has BGR ordering and + % `SwapRB` is true. default [0,0,0] % * __ScaleFactor__ multiplier for images values. default 1.0 % * __SwapRB__ flag which indicates that swap first and last - % channels in 3-channel image is necessary. default true + % channels in 3-channel image is necessary. For instance, Caffe + % models are usually trained on BGR images, while TensorFlow + % models expect RGB images as input. default true + % * __Crop__ flag which indicates whether image will be cropped + % after resize or not. default true % % Creates blob and optionally resizes and crops the images from % center, subtracts mean values, scales values, and swaps blue and % red channels. % - % Input image is resized so one side after resize is equal to - % corresponing dimension in `Size` and another one is equal or - % larger. Then, crop from the center is performed. + % If `Crop` is true, input image is resized so one side after + % resize is equal to corresponing dimension in `Size` and another + % one is equal or larger. Then, crop from the center is performed. + % If `Crop` is false, direct resize without cropping and + % preserving aspect ratio is performed. % % A blob is a 4-dimensional matrix (so-called batch) with the % following shape: `[num, cn, rows, cols]`. @@ -690,5 +720,25 @@ function setPreferableTarget(this, target) % blob = Net_(0, 'blobFromImages', img, varargin{:}); end + + function shrinkCaffeModel(src, dst) + %SHRINKCAFFEMODEL Convert all weights of Caffe network to half precision floating point + % + % cv.Net.shrinkCaffeModel(src, dst) + % + % ## Input + % * __src__ Path to origin model from Caffe framework contains + % single precision floating point weights (usually has + % `.caffemodel` extension). + % * __dst__ Path to destination model with updated weights. + % + % Note: Shrinked model has no origin `float32` weights so it can't + % be used in origin Caffe framework anymore. However the structure + % of data is taken from NVidia's + % . So the resulting + % model may be used there. + % + Net_(0, 'shrinkCaffeModel', src, dst); + end end end diff --git a/+cv/NormalBayesClassifier.m b/+cv/NormalBayesClassifier.m index 15fd52186..13a60a448 100644 --- a/+cv/NormalBayesClassifier.m +++ b/+cv/NormalBayesClassifier.m @@ -30,8 +30,8 @@ function this = NormalBayesClassifier(varargin) %NORMALBAYESCLASSIFIER Creates/trains a new Bayes classifier model % - % model = cv.NormalBayesClassifier() - % model = cv.NormalBayesClassifier(...) + % model = cv.NormalBayesClassifier() + % model = cv.NormalBayesClassifier(...) % % The first variant creates an empty model. Use % cv.NormalBayesClassifier.train to train the model after @@ -51,7 +51,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.NormalBayesClassifier % @@ -65,7 +65,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -81,11 +81,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.NormalBayesClassifier.clear, cv.NormalBayesClassifier.load % @@ -95,17 +95,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -119,23 +119,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -149,11 +148,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.NormalBayesClassifier.save, cv.NormalBayesClassifier.load % @@ -166,7 +165,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -179,7 +178,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -192,11 +191,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % Always true for bayes classifier. % @@ -208,120 +207,115 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the statistical model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should have - % `single` type. By default, each row represents a sample - % (see the `Layout` option). + % `single` type. By default, each row represents a sample (see + % the `Layout` option). % * __responses__ matrix of associated responses. A vector of - % categorical labels. + % categorical labels. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __UpdateModel__ Specifies whether the model should be trained - % from scratch (`UpdateModel=false`), or should be updated - % using the new training data (`UpdateModel=true`) instead - % of being completely overwritten. default false + % from scratch (`UpdateModel=false`), or should be updated using + % the new training data (`UpdateModel=true`) instead of being + % completely overwritten. default false % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -354,10 +348,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -371,14 +365,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -392,12 +385,12 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts responses for input samples % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples, floating-point matrix. One or - % more vectors stored as rows of the matrix. + % more vectors stored as rows of the matrix. % % ## Output % * __results__ The predicted class for each sample. @@ -405,10 +398,10 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (class - % index without mapping to class labels). default false + % index without mapping to class labels). default false % % The method is an alias for cv.NormalBayesClassifier.predictProb, % without returning the probabilities. @@ -424,28 +417,28 @@ function load(this, fname_or_str, varargin) function [outputs,outputProbs,f] = predictProb(this, inputs, varargin) %PREDICTPROB Predicts the response for sample(s) % - % outputs = model.predictProb(inputs) - % [outputs,outputProbs,f] = model.predictProb(inputs) - % [...] = model.predictProb(inputs, 'OptionName',optionValue, ...) + % outputs = model.predictProb(inputs) + % [outputs,outputProbs,f] = model.predictProb(inputs) + % [...] = model.predictProb(inputs, 'OptionName',optionValue, ...) % % ## Input % * __inputs__ The input samples, floating-point matrix. One or - % more vectors stored as rows of the matrix. + % more vectors stored as rows of the matrix. % % ## Output % * __outputs__ The predicted class for each sample. % * __outputProbs__ Matrix that contains the output probabilities - % corresponding to each element of result. (A matrix of size - % `nsamples-by-nclasses`). The probabilities are not - % normalized to the 0..1 range. + % corresponding to each element of result. (A matrix of size + % `nsamples-by-nclasses`). The probabilities are not normalized + % to the 0..1 range. % * __f__ unused and returns 0. % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (class - % index without mapping to class labels). default false + % index without mapping to class labels). default false % % The method estimates the most probable classes for input % vectors. Input vectors (one or more) are stored as rows of the diff --git a/+cv/ORB.m b/+cv/ORB.m index e5fb01220..86d479733 100644 --- a/+cv/ORB.m +++ b/+cv/ORB.m @@ -18,7 +18,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -66,13 +67,12 @@ WTA_K % Algorithm used to rank features. % - % * __Harris__ means that Harris algorithm is used to - % rank features (the score is written to `keypoint.response` and - % is used to retain best `NFeatures` features). - % This is the default + % * __Harris__ means that Harris algorithm is used to rank features + % (the score is written to `keypoint.response` and is used to retain + % best `NFeatures` features). This is the default % * __FAST__ is alternative value of the parameter that produces - % slightly less stable keypoints, but it is a little faster to - % compute. + % slightly less stable keypoints, but it is a little faster to + % compute. ScoreType % Size of the patch used by the oriented BRIEF descriptor. % @@ -90,8 +90,8 @@ function this = ORB(varargin) %ORB The ORB constructor % - % obj = cv.ORB() - % obj = cv.ORB(..., 'OptionName',optionValue, ...) + % obj = cv.ORB() + % obj = cv.ORB(..., 'OptionName',optionValue, ...) % % ## Options % * __MaxFeatures__ See cv.ORB.MaxFeatures, default 500 @@ -112,7 +112,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ORB % @@ -123,7 +123,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -137,7 +137,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.ORB.empty, cv.ORB.load % @@ -147,11 +147,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.ORB.clear, cv.ORB.load % @@ -161,7 +161,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -177,21 +177,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -205,11 +205,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ORB.save, cv.ORB.load % @@ -222,16 +222,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for ORB. % @@ -243,7 +243,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -258,7 +258,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -273,27 +273,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.ORB.compute, cv.ORB.detectAndCompute % @@ -303,26 +301,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.ORB.detect, cv.ORB.detectAndCompute @@ -333,41 +331,41 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Finds keypoints in an image and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a 32-element - % vector, as returned by cv.ORB.descriptorSize, so the total - % size of descriptors will be - % `numel(keypoints) * obj.descriptorSize()`, i.e a matrix - % of size `N-by-32` of class `uint8`, one row per keypoint. + % vectors of descriptors. Each descriptor is a 32-element + % vector, as returned by cv.ORB.descriptorSize, so the total + % size of descriptors will be + % `numel(keypoints) * obj.descriptorSize()`, i.e a matrix of + % size `N-by-32` of class `uint8`, one row per keypoint. % % ## Options % * __Mask__ optional operation mask specifying where to look for - % keypoints. Not set by default + % keypoints. Not set by default % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.ORB.detect, cv.ORB.compute % diff --git a/+cv/OnePassStabilizer.m b/+cv/OnePassStabilizer.m index 96a963163..94f675963 100644 --- a/+cv/OnePassStabilizer.m +++ b/+cv/OnePassStabilizer.m @@ -28,7 +28,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -46,7 +47,7 @@ function this = OnePassStabilizer() %ONEPASSSTABILIZER Constructor % - % obj = cv.OnePassStabilizer() + % obj = cv.OnePassStabilizer() % % See also: cv.OnePassStabilizer.nextFrame % @@ -56,7 +57,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.OnePassStabilizer % @@ -70,13 +71,13 @@ function delete(this) function frame = nextFrame(this, varargin) %NEXTFRAME Process next frame from input and return output result % - % frame = obj.nexFrame() - % frame = obj.nexFrame('OptionName',optionValue, ...) + % frame = obj.nexFrame() + % frame = obj.nexFrame('OptionName',optionValue, ...) % % ## Options % * __FlipChannels__ in case the output is color image, flips the - % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA - % order. default false + % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. + % default false % % ## Output % * __frame__ Output result @@ -89,7 +90,7 @@ function delete(this) function reset(this) %RESET Reset the frame source % - % obj.reset() + % obj.reset() % % See also: cv.OnePassStabilizer.nextFrame % @@ -102,15 +103,15 @@ function reset(this) function setLog(this, logType) %SETLOG Set logger class for the video stabilizer % - % stab.setLog(logType) + % stab.setLog(logType) % % ## Input % * __logType__ Logging type. One of: - % * __NullLog__ no logging. - % * __LogToStdout__ (default) log messages to standard - % output. Note that standard output is not displayed - % in MATLAB, you should use `LogToMATLAB` instead. - % * __LogToMATLAB__ log messages to MATLAB command window. + % * __NullLog__ no logging. + % * __LogToStdout__ (default) log messages to standard output. + % Note that standard output is not displayed in MATLAB, you + % should use `LogToMATLAB` instead. + % * __LogToMATLAB__ log messages to MATLAB command window. % % The class uses `LogToStdout` by default. % @@ -121,7 +122,7 @@ function setLog(this, logType) function value = getLog(this) %GETLOG Get the current logger class % - % value = stab.getLog() + % value = stab.getLog() % % ## Output % * __value__ output scalar struct @@ -134,20 +135,20 @@ function setLog(this, logType) function setFrameSource(this, frameSourceType, varargin) %SETFRAMESOURCE Set input frame source for the video stabilizer % - % stab.setInput(frameSourceType, ...) + % stab.setInput(frameSourceType, ...) % - % stab.setFrameSource('NullFrameSource') - % stab.setFrameSource('VideoFileSource', filename) - % stab.setFrameSource('VideoFileSource', filename, 'OptionName',optionValue, ...) + % stab.setFrameSource('NullFrameSource') + % stab.setFrameSource('VideoFileSource', filename) + % stab.setFrameSource('VideoFileSource', filename, 'OptionName',optionValue, ...) % % ## Input % * __frameSourceType__ Input frames source type. One of: - % * __NullFrameSource__ - % * __VideoFileSource__ wrapper around cv.VideoCapture with - % a video file or image sequence as source. + % * __NullFrameSource__ + % * __VideoFileSource__ wrapper around cv.VideoCapture with a + % video file or image sequence as source. % * __filename__ name of the opened video file (eg. `video.avi`) - % or image sequence (eg. `img_%02d.jpg`, which will read - % samples like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) + % or image sequence (eg. `img_%02d.jpg`, which will read samples + % like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) % % ## Options % * __VolatileFrame__ default false @@ -161,7 +162,7 @@ function setFrameSource(this, frameSourceType, varargin) function value = getFrameSource(this) %GETFRAMESOURCE Get the current input frame source % - % value = stab.getFrameSource() + % value = stab.getFrameSource() % % ## Output % * __value__ output scalar struct @@ -174,128 +175,127 @@ function setFrameSource(this, frameSourceType, varargin) function setMotionEstimator(this, motionEstType, varargin) %SETMOTIONESTIMATOR Set the motion estimating algorithm for the video stabilizer % - % stab.setMotionEstimator(motionEstType, ...) + % stab.setMotionEstimator(motionEstType, ...) % - % stab.setMotionEstimator('KeypointBasedMotionEstimator', {estType, ...}, 'OptionName',optionValue, ...) - % stab.setMotionEstimator('FromFileMotionReader', path, 'OptionName',optionValue, ...) - % stab.setMotionEstimator('ToFileMotionWriter', path, {motionEstType, ...}, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('KeypointBasedMotionEstimator', {estType, ...}, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('FromFileMotionReader', path, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('ToFileMotionWriter', path, {motionEstType, ...}, 'OptionName',optionValue, ...) % % ## Input % * __motionEstType__ Global 2D motion estimation methods which - % take frames as input. One of: - % * __KeypointBasedMotionEstimator__ Describes a global 2D - % motion estimation method which uses keypoints - % detection and optical flow for matching. - % * __FromFileMotionReader__ - % * __ToFileMotionWriter__ + % take frames as input. One of: + % * __KeypointBasedMotionEstimator__ Describes a global 2D + % motion estimation method which uses keypoints detection and + % optical flow for matching. + % * __FromFileMotionReader__ + % * __ToFileMotionWriter__ % * __path__ name of file for motion to read-from/write-to. % * __estType__ Global motion estimation method, which estimates - % global motion between two 2D point clouds as a 3x3 2D - % transformation matrix. One of: - % * __MotionEstimatorL1__ Describes a global 2D motion - % estimation method which minimizes L1 error. - % Note: To be able to use this method you must build - % OpenCV with CLP library support. - % * __MotionEstimatorRansacL2__ Describes a robust - % RANSAC-based global 2D motion estimation method - % which minimizes L2 error. + % global motion between two 2D point clouds as a 3x3 2D + % transformation matrix. One of: + % * __MotionEstimatorL1__ Describes a global 2D motion + % estimation method which minimizes L1 error. Note: To be able + % to use this method you must build OpenCV with CLP library + % support. + % * __MotionEstimatorRansacL2__ Describes a robust RANSAC-based + % global 2D motion estimation method which minimizes L2 error. % % ## Options % The following are options for the various algorithms: % % ### `KeypointBasedMotionEstimator`, `FromFileMotionReader`, `ToFileMotionWriter` % * __MotionModel__ Describes motion model between two point - % clouds. Default is based on the estimation method. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ - % * __Unknown__ + % clouds. Default is based on the estimation method. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ + % * __Unknown__ % % ### `KeypointBasedMotionEstimator` % * __Detector__ feature detector, specified in the form: - % `{detectorType, 'OptionName',optionValue, ...}`. - % See cv.FeatureDetector.FeatureDetector for a list of - % supported feature detectors. Default is `{'GFTTDetector'}`. + % `{detectorType, 'OptionName',optionValue, ...}`. See + % cv.FeatureDetector.FeatureDetector for a list of supported + % feature detectors. Default is `{'GFTTDetector'}`. % * __OpticalFlowEstimator__ sparse optical flow estimator - % specified as: `{optflowType, 'OptionName',optionValue, ...}`, - % where `optflowType` is one of: - % * __SparsePyrLkOptFlowEstimator__ (default) wrapper around - % cv.calcOpticalFlowPyrLK. - % * __SparsePyrLkOptFlowEstimatorGpu__ + % specified as: `{optflowType, 'OptionName',optionValue, ...}`, + % where `optflowType` is one of: + % * __SparsePyrLkOptFlowEstimator__ (default) wrapper around + % cv.calcOpticalFlowPyrLK. + % * __SparsePyrLkOptFlowEstimatorGpu__ % * __OutlierRejector__ outlier rejector specified as: - % `{rejectorType, 'OptionName',optionValue, ...}`, where - % `rejectorType` is one of: - % * __NullOutlierRejector__ (default) - % * __TranslationBasedLocalOutlierRejector__ + % `{rejectorType, 'OptionName',optionValue, ...}`, where + % `rejectorType` is one of: + % * __NullOutlierRejector__ (default) + % * __TranslationBasedLocalOutlierRejector__ % % ### `MotionEstimatorL1` and `MotionEstimatorRansacL2` % * __MotionModel__ Describes motion model between two point - % clouds. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ (default) - % * __Homography__ - % * __Unknown__ + % clouds. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ (default) + % * __Homography__ + % * __Unknown__ % % ### `MotionEstimatorRansacL2` % * __MinInlierRatio__ default 0.1 % * __RansacParams__ Describes RANSAC method parameters. A struct - % with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. - % - % If a string is passed, it uses the default RANSAC - % parameters for the given motion model. Here are the - % defaults corresponding to each motion model: - % - % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % - % By default is it set to 'Affine'. + % with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. + % + % If a string is passed, it uses the default RANSAC parameters + % for the given motion model. Here are the defaults + % corresponding to each motion model: + % + % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % + % By default is it set to 'Affine'. % % ### `SparsePyrLkOptFlowEstimator` % * __WinSize__ Size of the search window at each pyramid level. - % default [21,21] + % default [21,21] % * __MaxLevel__ 0-based maximal pyramid level number. default 3 % % ### `TranslationBasedLocalOutlierRejector` % * __CellSize__ default [50,50] % * __RansacParams__ Describes RANSAC method parameters. A struct - % with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. - % - % If a string is passed, it uses the default RANSAC - % parameters for the given motion model. Here are the - % defaults corresponding to each motion model: - % - % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % - % By default is it set to 'Translation'. + % with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. + % + % If a string is passed, it uses the default RANSAC parameters + % for the given motion model. Here are the defaults + % corresponding to each motion model: + % + % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % + % By default is it set to 'Translation'. % % The class uses `KeypointBasedMotionEstimator` by default with % `MotionEstimatorRansacL2`. @@ -307,7 +307,7 @@ function setMotionEstimator(this, motionEstType, varargin) function value = getMotionEstimator(this) %GETMOTIONESTIMATOR Get the current motion estimating algorithm % - % value = stab.getMotionEstimator() + % value = stab.getMotionEstimator() % % ## Output % * __value__ output scalar struct @@ -320,15 +320,15 @@ function setMotionEstimator(this, motionEstType, varargin) function setDeblurer(this, deblurerType, varargin) %SETDEBLURER Set the deblurring algorithm for the video stabilizer % - % stab.setDeblurer(deblurerType, ...) + % stab.setDeblurer(deblurerType, ...) % - % stab.setDeblurer('NullDeblurer') - % stab.setDeblurer('WeightingDeblurer', 'OptionName',optionValue, ...) + % stab.setDeblurer('NullDeblurer') + % stab.setDeblurer('WeightingDeblurer', 'OptionName',optionValue, ...) % % ## Input % * __deblurerType__ Deblurring method. One of: - % * __NullDeblurer__ - % * __WeightingDeblurer__ + % * __NullDeblurer__ + % * __WeightingDeblurer__ % % ## Options % * __Radius__ default 0 @@ -343,7 +343,7 @@ function setDeblurer(this, deblurerType, varargin) function value = getDeblurer(this) %GETDEBLURER Gets the current deblurring algorithm % - % value = stab.getDeblurer() + % value = stab.getDeblurer() % % ## Output % * __value__ output scalar struct @@ -356,47 +356,47 @@ function setDeblurer(this, deblurerType, varargin) function setInpainter(this, inpainterType, varargin) %SETINPAINTER Set the inpainting algorithm for the video stabilizer % - % stab.setInpainter(inpainterType, ...) + % stab.setInpainter(inpainterType, ...) % - % stab.setInpainter('NullInpainter') - % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('InpaintingPipeline', {{inpainterType, ...}, {inpainterType, ...}, ...}, 'OptionName',optionValue, ...) - % stab.setInpainter('ConsistentMosaicInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('MotionInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('ColorAverageInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('NullInpainter') + % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('InpaintingPipeline', {{inpainterType, ...}, {inpainterType, ...}, ...}, 'OptionName',optionValue, ...) + % stab.setInpainter('ConsistentMosaicInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('MotionInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('ColorAverageInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) % % ## Input % * __inpainterType__ inpainting method. One of: - % * __NullInpainter__ Null inpainter. - % * __InpaintingPipeline__ A pipeline composed of other - % inpainters, applied in sequence. - % * __ConsistentMosaicInpainter__ - % * __MotionInpainter__ (requires CUDA) - % * __ColorAverageInpainter__ - % * __ColorInpainter__ + % * __NullInpainter__ Null inpainter. + % * __InpaintingPipeline__ A pipeline composed of other + % inpainters, applied in sequence. + % * __ConsistentMosaicInpainter__ + % * __MotionInpainter__ (requires CUDA) + % * __ColorAverageInpainter__ + % * __ColorInpainter__ % % ## Options % The following are options accepted by all algorithms: % % * __MotionModel__ Describes motion model between two point - % clouds. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ - % * __Unknown__ (default) + % clouds. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ + % * __Unknown__ (default) % * __Radius__ default 0 % % The following are options for the various algorithms: % % ### `ColorInpainter` % * __Method__ Inpainting algorithm. One of: - % * __NS__ Navier-Stokes based method - % * __Telea__ Method by Alexandru Telea (default) + % * __NS__ Navier-Stokes based method + % * __Telea__ Method by Alexandru Telea (default) % * __Radius2__ default 2.0 % % ### `ConsistentMosaicInpainter` @@ -404,9 +404,9 @@ function setInpainter(this, inpainterType, varargin) % % ### `MotionInpainter` % * __OptFlowEstimator__ dense optical flow estimator specified as - % `{optflowType, 'OptionName',optionValue, ...}`, where - % `optflowType` is one of: - % * __DensePyrLkOptFlowEstimatorGpu__ (default, requires CUDA) + % `{optflowType, 'OptionName',optionValue, ...}`, where + % `optflowType` is one of: + % * __DensePyrLkOptFlowEstimatorGpu__ (default, requires CUDA) % * __FlowErrorThreshold__ default 1e-4 % * __DistThreshold__ default 5.0 % * __BorderMode__ default 'Replicate' @@ -424,7 +424,7 @@ function setInpainter(this, inpainterType, varargin) function value = getInpainter(this) %GETINPAINTER Gets the current inpainting algorithm % - % value = stab.getInpainter() + % value = stab.getInpainter() % % ## Output % * __value__ output scalar struct @@ -440,11 +440,11 @@ function setInpainter(this, inpainterType, varargin) function setMotionFilter(this, motionFilterType, varargin) %SETMOTIONFILTER Set the motion filtering algorithm for the video stabilizer % - % stab.setMotionFilter(motionFilterType, 'OptionName',optionValue, ...) + % stab.setMotionFilter(motionFilterType, 'OptionName',optionValue, ...) % % ## Input % * __motionFilterType__ motion filtering method. One of: - % * __GaussianMotionFilter__ + % * __GaussianMotionFilter__ % % ## Options % * __Radius__ default 15 @@ -459,7 +459,7 @@ function setMotionFilter(this, motionFilterType, varargin) function value = getMotionFilter(this) %GETMOTIONFILTER Get the current motion filtering algorithm % - % value = stab.getMotionFilter() + % value = stab.getMotionFilter() % % ## Output % * __value__ output scalar struct @@ -475,26 +475,26 @@ function setMotionFilter(this, motionFilterType, varargin) function ransac = RansacParamsDefault2dMotion(model) %RANSACPARAMSDEFAULT2DMOTION Default RANSAC method parameters for a given motion model % - % ransac = cv.OnePassStabilizer.RansacParamsDefault2dMotion(model) + % ransac = cv.OnePassStabilizer.RansacParamsDefault2dMotion(model) % % ## Input % * __model__ Motion model. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ % % ## Output % * __ransac__ Default RANSAC method parameters for the given - % motion model. A struct with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. + % motion model. A struct with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. % % Here are the parameters corresponding to each motion model: % diff --git a/+cv/PCA.m b/+cv/PCA.m index 80e849cc6..000a1d5d1 100644 --- a/+cv/PCA.m +++ b/+cv/PCA.m @@ -17,33 +17,34 @@ % space with a much shorter vector consisting of the projected vector's % coordinates in the subspace. Such a transformation is also known as % Karhunen-Loeve Transform, or KLT. See - % [PCA](http://en.wikipedia.org/wiki/Principal_component_analysis). + % [PCA](https://en.wikipedia.org/wiki/Principal_component_analysis). % % ## Example % The following shows a quick example of how to reduce dimensionality % of samples from 10 to 3. % - % Xtrain = randn(100,10); - % Xtest = randn(100,10); - % pca = cv.PCA(Xtrain, 'MaxComponents',3); - % Y = pca.project(Xtest); - % Xapprox = pca.backProject(Y); + % Xtrain = randn(100,10); + % Xtest = randn(100,10); + % pca = cv.PCA(Xtrain, 'MaxComponents',3); + % Y = pca.project(Xtest); + % Xapprox = pca.backProject(Y); % % The class also implements the save/load pattern to regular MAT-files, % so we can do the following: % - % pca = cv.PCA(randn(100,5)); - % save out.mat pca - % clear pca + % pca = cv.PCA(randn(100,5)); + % save out.mat pca + % clear pca % - % load out.mat - % disp(pca) + % load out.mat + % disp(pca) % - % See also: cv.PCA.PCA + % See also: cv.PCA.PCA, cv.SVD % properties (SetAccess = private) - id % Object ID + % Object ID + id end % cached properties of their C++ couterpart @@ -68,9 +69,9 @@ function this = PCA(varargin) %PCA PCA constructors % - % pca = cv.PCA() - % pca = cv.PCA(data, 'OptionName', optionValue, ...) - % pca = cv.PCA(S) + % pca = cv.PCA() + % pca = cv.PCA(data, 'OptionName', optionValue, ...) + % pca = cv.PCA(S) % % ## Input % * __data__ input samples stored as matrix rows or matrix columns @@ -103,7 +104,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.PCA % @@ -114,18 +115,18 @@ function delete(this) function read(this, fname_or_str, varargin) %READ Read PCA objects from file % - % obj.read(filename) - % obj.read(str, 'FromString',true) - % obj.read(..., 'OptionName', optionValue, ...) + % obj.read(filename) + % obj.read(str, 'FromString',true) + % obj.read(..., 'OptionName', optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing serialized object you want to load. % % ## Options - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized object. - % default false + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized object. + % default false % % Loads `eigenvalues`, `eigenvectors` and `mean` from specified % storage. @@ -138,15 +139,15 @@ function read(this, fname_or_str, varargin) function varargout = write(this, filename) %WRITE Write PCA objects to file % - % obj.write(filename) - % str = obj.write(filename) + % obj.write(filename) + % str = obj.write(filename) % % ## Input % * __filename__ Name of the file to write to. % % ## Output % * __str__ optional output. If requested, the object is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % Writes `eigenvalues`, `eigenvectors` and `mean` to the specified % file. @@ -159,27 +160,27 @@ function read(this, fname_or_str, varargin) function compute(this, data, varargin) %COMPUTE Performs Principal Component Analysis on the supplied dataset % - % pca.compute(data) - % pca.compute(..., 'OptionName', optionValue, ...) + % pca.compute(data) + % pca.compute(..., 'OptionName', optionValue, ...) % % ## Input % * __data__ input samples stored as the matrix rows or as the - % matrix columns + % matrix columns % % ## Options % * __DataAs__ Data layout option. Default 'Row'. One of: - % * __Row__ indicates that the input samples are stored as - % matrix rows. - % * __Col__ indicates that the input samples are stored as - % matrix columns. + % * __Row__ indicates that the input samples are stored as + % matrix rows. + % * __Col__ indicates that the input samples are stored as + % matrix columns. % * __MaxComponents__ Maximum number of components that PCA should - % retain. default 0 (all the components are retained). + % retain. default 0 (all the components are retained). % * __RetainedVariance__ Percentage of variance that PCA should - % retain. Using this parameter will let the PCA decided how - % many components to retain but it will always keep at - % least 2. default 1.0 (all the components are retained). + % retain. Using this parameter will let the PCA decided how many + % components to retain but it will always keep at least 2. + % default 1.0 (all the components are retained). % * __Mean__ Optional mean value. By default, the mean is computed - % from the data. Not set by default. + % from the data. Not set by default. % % **Note**: `RetainedVariance` and `MaxComponents` are mutually % exclusive options, and shoudn't be used together. @@ -208,23 +209,21 @@ function compute(this, data, varargin) function Y = project(this, X) %PROJECT Projects vector(s) to the principal component subspace % - % Y = pca.project(X) + % Y = pca.project(X) % % ## Input % * __X__ input vector(s); must have the same dimensionality and - % the same layout as the input data used at PCA phase, that - % is, if 'Row' was specified, then `size(X,2)==size(data,2)` - % (vector dimensionality) and `size(X,1)` is the number of - % vectors to project, and the same is true for the 'Col' - % case. + % the same layout as the input data used at PCA phase, that is, + % if 'Row' was specified, then `size(X,2)==size(data,2)` (vector + % dimensionality) and `size(X,1)` is the number of vectors to + % project, and the same is true for the 'Col' case. % % ## Output % * __Y__ output vectors (PC coefficients); in case of 'Col', the - % output matrix has as many columns as the number of input - % vectors, this means that `size(Y,2)==size(X,2)` and the - % number of rows match the number of principal components - % (for example, `MaxComponents` parameter passed to the - % constructor). + % output matrix has as many columns as the number of input + % vectors, this means that `size(Y,2)==size(X,2)` and the number + % of rows match the number of principal components (for example, + % `MaxComponents` parameter passed to the constructor). % % The method project one or more vectors to the principal % component subspace, where each vector projection is @@ -238,16 +237,16 @@ function compute(this, data, varargin) function X = backProject(this, Y) %BACKPROJECT Reconstructs vectors from their PC projections % - % X = pca.backProject(Y) + % X = pca.backProject(Y) % % ## Input % * __Y__ coordinates of the vectors in the principal component - % subspace, the layout and size are the same as of - % cv.PCA.project output vectors. + % subspace, the layout and size are the same as of + % cv.PCA.project output vectors. % % ## Output % * __X__ reconstructed vectors; the layout and size are the same - % as of cv.PCA.project input vectors. + % as of cv.PCA.project input vectors. % % The method is the inverse operation to cv.PCA.project. It % takes PC coordinates of projected vectors and reconstruct the @@ -304,7 +303,7 @@ function compute(this, data, varargin) function S = struct(this) %STRUCT Converts to a struct array % - % S = struct(obj) + % S = struct(obj) % % ## Output % * __S__ output struct array @@ -319,7 +318,7 @@ function compute(this, data, varargin) function S = saveobj(this) %SAVEOBJ Serialization before save % - % S = obj.saveobj() + % S = obj.saveobj() % % ## Output % * __S__ output struct. @@ -336,7 +335,7 @@ function compute(this, data, varargin) function this = loadobj(S) %LOADOBJ Deserialization after load % - % obj = loadobj(S) + % obj = loadobj(S) % % ## Input % * __S__ input struct. diff --git a/+cv/PSNR.m b/+cv/PSNR.m index 2a9fdd5d9..162180968 100644 --- a/+cv/PSNR.m +++ b/+cv/PSNR.m @@ -1,16 +1,27 @@ -%PSNR Computes PSNR image/video quality metric +%PSNR Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric % -% psnr = cv.PSNR(src1, src2) +% psnr = cv.PSNR(src1, src2) % % ## Input -% * __src1__ first image (gray or color), 8-bit integer type. -% * __src2__ second image of same size and type as `src1`. +% * __src1__ first input array (gray or color image), 8-bit integer type. +% * __src2__ second input array of the same size and type as `src1`. % % ## Output % * __psnr__ Computed signal-to-noise ratio % -% See [PSNR](http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio] for -% details. +% This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality +% metric in decibels (dB), between two input arrays `src1` and `src2`. Arrays +% must have `uint8` depth. +% +% The PSNR is calculated as follows: +% +% PSNR = 10 * log10(R^2 / MSE) +% +% where `R` is the maximum integer value of `uint8` depth (255) and `MSE` is +% the mean squared error between the two arrays. +% +% See [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) for +% more details. % % See also: psnr, immse, ssim % diff --git a/+cv/RQDecomp3x3.m b/+cv/RQDecomp3x3.m index 2601f7e66..e8585a85b 100644 --- a/+cv/RQDecomp3x3.m +++ b/+cv/RQDecomp3x3.m @@ -1,7 +1,7 @@ %RQDECOMP3X3 Computes an RQ decomposition of 3x3 matrices % -% [R,Q] = cv.RQDecomp3x3(M) -% [R,Q,S] = cv.RQDecomp3x3(M) +% [R,Q] = cv.RQDecomp3x3(M) +% [R,Q,S] = cv.RQDecomp3x3(M) % % ## Input % * __M__ 3x3 input matrix. @@ -10,11 +10,11 @@ % * __R__ 3x3 upper-triangular matrix. % * __Q__ 3x3 orthogonal matrix. % * __S__ Optional output struct with the following fields: -% * __Qx__ 3x3 rotation matrix around x-axis. -% * __Qy__ 3x3 rotation matrix around y-axis. -% * __Qz__ 3x3 rotation matrix around z-axis. -% * __eulerAngles__ 3-element vector containing three Euler angles of -% rotation in degrees. +% * __Qx__ 3x3 rotation matrix around x-axis. +% * __Qy__ 3x3 rotation matrix around y-axis. +% * __Qz__ 3x3 rotation matrix around z-axis. +% * __eulerAngles__ 3-element vector containing three Euler angles of +% rotation in degrees. % % The function computes a RQ decomposition using the given rotations. This % function is used in cv.decomposeProjectionMatrix to decompose the left diff --git a/+cv/RTrees.m b/+cv/RTrees.m index 6c5a1c7ab..cd346982e 100644 --- a/+cv/RTrees.m +++ b/+cv/RTrees.m @@ -192,8 +192,8 @@ function this = RTrees(varargin) %RTREES Creates/trains a new Random Trees model % - % model = cv.RTrees() - % model = cv.RTrees(...) + % model = cv.RTrees() + % model = cv.RTrees(...) % % The first variant creates an empty model. Use cv.RTrees.train to % train the model, or cv.RTrees.load to load a pre-trained model. @@ -212,7 +212,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.RTrees % @@ -226,7 +226,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -242,11 +242,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.RTrees.clear, cv.RTrees.load % @@ -256,17 +256,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -280,23 +280,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -310,11 +309,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.RTrees.save, cv.RTrees.load % @@ -327,7 +326,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -340,7 +339,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -353,11 +352,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % See also: cv.RTrees.isTrained % @@ -367,116 +366,111 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the Random Trees model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Row vectors of feature. % * __responses__ Output of the corresponding feature vectors. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ See the predict method. default false % * __PredictSum__ See the predict method. default false % * __PredictMaxVote__ See the predict method. default false % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -498,10 +492,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -515,14 +509,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -536,12 +529,12 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Input row vectors (one or more) stored as rows of - % a floating-point matrix. + % a floating-point matrix. % % ## Output % * __results__ Output labels or regression values. @@ -549,30 +542,29 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % sum), not the class label. default false + % sum), not the class label. default false % * __CompressedInput__ compressed data, containing only the - % active samples/variables. default false + % active samples/variables. default false % * __PreprocessedInput__ This parameter is normally set to false, - % implying a regular input. If it is true, the method - % assumes that all the values of the discrete input - % variables have been already normalized to 0..NCategories - % ranges since the decision tree uses such normalized - % representation internally. It is useful for faster - % prediction with tree ensembles. For ordered input - % variables, the flag is not used. Default false + % implying a regular input. If it is true, the method assumes + % that all the values of the discrete input variables have been + % already normalized to 0..NCategories ranges since the decision + % tree uses such normalized representation internally. It is + % useful for faster prediction with tree ensembles. For ordered + % input variables, the flag is not used. Default false % * __PredictAuto__ Setting this to true, overrides all of the - % other `Predict*` flags. It automatically chooses between - % `PredictSum` and `PredictMaxVote` (if the model is a - % regressor or the number of classes are 2 with `RawOutput` - % set then it picks `PredictSum`, otherwise it picks - % `PredictMaxVote` by default). default true + % other `Predict*` flags. It automatically chooses between + % `PredictSum` and `PredictMaxVote` (if the model is a regressor + % or the number of classes are 2 with `RawOutput` set then it + % picks `PredictSum`, otherwise it picks `PredictMaxVote` by + % default). default true % * __PredictSum__ If true then return sum of votes instead of the - % class label. default false + % class label. default false % * __PredictMaxVote__ If true then return the class label with - % the max vote. default false + % the max vote. default false % % This method returns the cumulative result from all the trees in % the forest (the class that receives the majority of voices, or @@ -589,12 +581,12 @@ function load(this, fname_or_str, varargin) function v = getVarImportance(this) %GETVARIMPORTANCE Returns the variable importance array % - % v = model.getVarImportance() + % v = model.getVarImportance() % % ## Output % * __v__ the variable importance vector, computed at the training - % stage when `CalculateVarImportance` is set to true. If - % this flag was set to false, the empty matrix is returned. + % stage when `CalculateVarImportance` is set to true. If this + % flag was set to false, the empty matrix is returned. % % See also: cv.RTrees.CalculateVarImportance % @@ -604,20 +596,20 @@ function load(this, fname_or_str, varargin) function votes = getVotes(this, samples, varargin) %GETVOTES Returns the result of each individual tree in the forest % - % votes = model.getVotes(samples) - % votes = model.getVotes(..., 'OptionName',optionValue, ...) + % votes = model.getVotes(samples) + % votes = model.getVotes(..., 'OptionName',optionValue, ...) % % ## Input % * __samples__ matrix containg the samples for which votes will - % be calculated. + % be calculated. % % ## Output % * __votes__ matrix of the result of the calculation. % % ## Options % * __Flags__ Flags for defining the type of RTrees. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ See the predict method. default false % * __CompressedInput__ See the predict method. default false % * __PreprocessedInput__ See the predict method. default false @@ -641,7 +633,7 @@ function load(this, fname_or_str, varargin) function roots = getRoots(this) %GETROOTS Returns indices of root nodes % - % roots = model.getRoots() + % roots = model.getRoots() % % ## Output % * __roots__ vector of indices. @@ -654,24 +646,22 @@ function load(this, fname_or_str, varargin) function nodes = getNodes(this) %GETNODES Returns all the nodes % - % nodes = model.getNodes() + % nodes = model.getNodes() % % ## Output % * __nodes__ Struct-array with the following fields: - % * __value__ Value at the node: a class label in case of - % classification or estimated function value in case - % of regression. - % * __classIdx__ Class index normalized to `0..class_count-1` - % range and assigned to the node. It is used - % internally in classification trees and tree - % ensembles. - % * __parent__ Index of the parent node. - % * __left__ Index of the left child node. - % * __right__ Index of right child node. - % * __defaultDir__ Default direction where to go (-1: left - % or +1: right). It helps in the case of missing - % values. - % * __split__ Index of the first split. + % * __value__ Value at the node: a class label in case of + % classification or estimated function value in case of + % regression. + % * __classIdx__ Class index normalized to `0..class_count-1` + % range and assigned to the node. It is used internally in + % classification trees and tree ensembles. + % * __parent__ Index of the parent node. + % * __left__ Index of the left child node. + % * __right__ Index of right child node. + % * __defaultDir__ Default direction where to go (-1 left or +1 + % right). It helps in the case of missing values. + % * __split__ Index of the first split. % % all the node indices are zero-based indices in the returned % vector. @@ -684,26 +674,25 @@ function load(this, fname_or_str, varargin) function splits = getSplits(this) %GETSPLITS Returns all the splits % - % splits = model.getSplits() + % splits = model.getSplits() % % ## Output % * __splits__ Struct-array with the following fields: - % * __varIdx__ Index of variable on which the split is - % created. - % * __inversed__ If true, then the inverse split rule is - % used (i.e. left and right branches are exchanged in - % the rule expressions below). - % * __quality__ The split quality, a positive number. It is - % used to choose the best split. (It is also used to - % compute variable importance). - % * __next__ Index of the next split in the list of splits - % for the node (surrogate splits). - % * __c__ The threshold value in case of split on an ordered - % variable. The rule is: - % `if var_value < c, next_node = left; else next_node = right; end` - % * __subsetOfs__ Offset of the bitset used by the split on - % a categorical variable. The rule is: - % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` + % * __varIdx__ Index of variable on which the split is created. + % * __inversed__ If true, then the inverse split rule is used + % (i.e. left and right branches are exchanged in the rule + % expressions below). + % * __quality__ The split quality, a positive number. It is used + % to choose the best split. (It is also used to compute + % variable importance). + % * __next__ Index of the next split in the list of splits for + % the node (surrogate splits). + % * __c__ The threshold value in case of split on an ordered + % variable. The rule is: + % `if var_value < c, next_node = left; else next_node = right; end` + % * __subsetOfs__ Offset of the bitset used by the split on a + % categorical variable. The rule is: + % `if bitset(var_value) == 1, next_node = left; else next_node = right; end` % % all the split indices are zero-based indices in the returned % vector. @@ -716,7 +705,7 @@ function load(this, fname_or_str, varargin) function subsets = getSubsets(this) %GETSUBSETS Returns all the bitsets for categorical splits % - % subsets = model.getSubsets() + % subsets = model.getSubsets() % % ## Output % * __subsets__ vector of indices. diff --git a/+cv/Rect.m b/+cv/Rect.m index 30f9e0ca4..6d41afdf2 100644 --- a/+cv/Rect.m +++ b/+cv/Rect.m @@ -12,34 +12,34 @@ % are inclusive, while the right and bottom boundaries are not. For % example, the method cv.Rect.contains returns true if: % - % x <= pt.x < x + width , y <= pt.y < y + height + % x <= pt.x < x + width , y <= pt.y < y + height % % Virtually every loop over an image ROI in OpenCV (where ROI is specified % by an integer rectangle) is implemented as: % - % roi = [x,y,w,h]; - % for y=roi(2):(roi(2)+roi(4)-1) - % for x=roi(1):(roi(1)+roi(3)-1) - % %... - % end - % end + % roi = [x,y,w,h]; + % for y=roi(2):(roi(2)+roi(4)-1) + % for x=roi(1):(roi(1)+roi(3)-1) + % %... + % end + % end % % In addition, the following operations on rectangles are implemented: % % * __adjustPosition__ (`rect += point`): shifting a rectangle by a - % certain offset. + % certain offset. % * __adjustSize__ (`rect += size`): expanding or shrinking a rectangle by - % a certain amount. + % a certain amount. % * __intersect__ (`rect1 & rect2`): rectangle intersection. % * __union__ (`rect1 | rect2`): minimum area rectangle containing `rect1` - % and `rect2`. + % and `rect2`. % % This is an example how the partial ordering on rectangles can be % established (`rect1 \subseteq rect2`): % - % function b = rect_le(r1, r2) - % b = all(cv.Rect.intersect(r1,r2) == r1); - % end + % function b = rect_le(r1, r2) + % b = all(cv.Rect.intersect(r1,r2) == r1); + % end % % See also: cv.RotatedRect % @@ -48,7 +48,7 @@ function r = from2points(pt1, pt2) %FROM2POINTS Create a rectangle from 2 points % - % r = cv.Rect.from2points(pt1, pt2) + % r = cv.Rect.from2points(pt1, pt2) % % ## Input % * __pt1__ First point `[x1,y1]`. @@ -63,7 +63,7 @@ function pt = tl(r) %TL The top-left corner % - % pt = cv.Rect.tl(r) + % pt = cv.Rect.tl(r) % % ## Input % * __r__ rectangle `[x,y,w,h]`. @@ -79,7 +79,7 @@ function pt = br(r) %BR The bottom-right corner % - % pt = cv.Rect.br(r) + % pt = cv.Rect.br(r) % % ## Input % * __r__ rectangle `[x,y,w,h]`. @@ -95,7 +95,7 @@ function sz = size(r) %SIZE Size (width, height) of the rectangle % - % sz = cv.Rect.size(r) + % sz = cv.Rect.size(r) % % ## Input % * __r__ rectangle `[x,y,w,h]`. @@ -111,7 +111,7 @@ function a = area(r) %AREA Area (width*height) of the rectangle % - % out = cv.Rect.area(r) + % out = cv.Rect.area(r) % % ## Input % * __r__ rectangle `[x,y,w,h]`. @@ -127,7 +127,7 @@ function b = contains(r, pt) %CONTAINS Checks whether the rectangle contains the point % - % b = cv.Rect.contains(r, pt) + % b = cv.Rect.contains(r, pt) % % ## Input % * __r__ rectangle `[x,y,w,h]` @@ -144,7 +144,7 @@ function r = adjustPosition(r, pt) %ADJUSTPOSITION Shift a rectangle by a certain offset % - % r = cv.Rect.adjustPosition(r, pt) + % r = cv.Rect.adjustPosition(r, pt) % % ## Input % * __r__ input rectangle `[x,y,w,h]`. @@ -161,7 +161,7 @@ function r = adjustSize(r, sz) %ADJUSTSIZE Expand or shrink a rectangle by a certain amount % - % r = cv.Rect.adjustSize(r, sz) + % r = cv.Rect.adjustSize(r, sz) % % ## Input % * __r__ input rectangle `[x,y,w,h]`. @@ -178,7 +178,7 @@ function r = intersect(r1, r2) %INTERSECT Rectangle intersection % - % r = cv.Rect.intersect(r1, r2) + % r = cv.Rect.intersect(r1, r2) % % ## Input % * __r1__ first rectangle `[x1,y1,w1,h1]`. @@ -195,7 +195,7 @@ function r = union(r1, r2) %UNION Minimum area rectangle % - % r = cv.Rect.union(r1, r2) + % r = cv.Rect.union(r1, r2) % % ## Input % * __r1__ first rectangle `[x1,y1,w1,h1]`. @@ -212,24 +212,24 @@ function out = crop(img, r, roi) %CROP Extract region-of-interest from image % - % roi = cv.Rect.crop(img, r) - % img = cv.Rect.crop(img, r, roi) + % roi = cv.Rect.crop(img, r) + % img = cv.Rect.crop(img, r, roi) % % ## Input % * __img__ input image. % * __r__ ROI rectangle `[x,y,w,h]`. % * __roi__ input cropped image, of size `[h,w]`, and same type - % and channels as input image `img`. + % and channels as input image `img`. % % ## Output % * __roi__ output cropped image. % * __img__ output image with updated ROI region. % % In the first variant, the function gets ROI region from image, - % i.e: `roi = img(r(1)+1:r(1)+r(3), r(2)+1:r(2)+r(4), :)`. + % i.e: `roi = img(r(2)+1:r(2)+r(4), r(1)+1:r(1)+r(3), :)`. % % In the second variant, the function sets ROI region inside image, - % i.e: `img(r(1)+1:r(1)+r(3), r(2)+1:r(2)+r(4), :) = roi` + % i.e: `img(r(2)+1:r(2)+r(4), r(1)+1:r(1)+r(3), :) = roi` % % See also: cv.getRectSubPix, imcrop % diff --git a/+cv/Rodrigues.m b/+cv/Rodrigues.m index dd8aa862c..5733c6d65 100644 --- a/+cv/Rodrigues.m +++ b/+cv/Rodrigues.m @@ -1,29 +1,29 @@ %RODRIGUES Converts a rotation matrix to a rotation vector or vice versa % -% dst = cv.Rodrigues(src) -% [dst,jacobian] = cv.Rodrigues(src) +% dst = cv.Rodrigues(src) +% [dst,jacobian] = cv.Rodrigues(src) % % ## Input -% * __src__ Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). -% Both single and double-precision floating-point types are supported. +% * __src__ Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). Both +% single and double-precision floating-point types are supported. % % ## Output % * __dst__ Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), -% respectively. Same data type as `src`. +% respectively. Same data type as `src`. % * __jacobian__ Optional output Jacobian matrix, 3x9 or 9x3, which is a -% matrix of partial derivatives of the output array components with -% respect to the input array components. Same data type as `src`. +% matrix of partial derivatives of the output array components with respect +% to the input array components. Same data type as `src`. % % The function transforms a rotation matrix in the following way: % -% theta <- norm(r) -% r <- r/theta -% R = cos(theta) * I + (1 - cos(theta)) * r * r^T + sin(theta) * A -% A = [0, -rz, ry; rz, 0, -rx; -ry, rx, 0] +% theta <- norm(r) +% r <- r/theta +% R = cos(theta) * I + (1 - cos(theta)) * r * r^T + sin(theta) * A +% A = [0, -rz, ry; rz, 0, -rx; -ry, rx, 0] % % Inverse transformation can be also done easily, since % -% sin(theta) * A = (R - R^T) / 2 +% sin(theta) * A = (R - R^T) / 2 % % A rotation vector is a convenient and most compact representation of a % rotation matrix (since any rotation matrix has just 3 degrees of diff --git a/+cv/RotatedRect.m b/+cv/RotatedRect.m index 92f2813a0..5ce34e07f 100644 --- a/+cv/RotatedRect.m +++ b/+cv/RotatedRect.m @@ -14,21 +14,21 @@ function rrect = from3points(pt1, pt2, pt3) %FROM3POINTS Create a rotated rectangle from 3 points % - % rrect = cv.RotatedRect.from3points(pt1, pt2, pt3) + % rrect = cv.RotatedRect.from3points(pt1, pt2, pt3) % % ## Input % * __pt1__, __pt2__, __pt3__ Any 3 end points `[x,y]` of the - % rotated rectangle. They must be given in order (either - % clockwise or anticlockwise). + % rotated rectangle. They must be given in order (either + % clockwise or anticlockwise). % % ## Output % * __rrect__ output rotated rectangle. A structure with the - % following fields: - % * __center__ The rectangle mass center `[x,y]`. - % * __size__ Width and height of the rectangle `[w,h]`. - % * __angle__ The rotation angle in a clockwise direction. - % When the angle is 0, 90, 180, 270 etc., the - % rectangle becomes an up-right rectangle. + % following fields: + % * __center__ The rectangle mass center `[x,y]`. + % * __size__ Width and height of the rectangle `[w,h]`. + % * __angle__ The rotation angle in a clockwise direction. When + % the angle is 0, 90, 180, 270 etc., the rectangle becomes an + % up-right rectangle. % rrect = RotatedRect_('from3points', pt1, pt2, pt3); end @@ -36,20 +36,21 @@ function pts = points(rrect) %POINTS Returns 4 vertices of the rectangle % - % pts = cv.RotatedRect.points(rrect) + % pts = cv.RotatedRect.points(rrect) % % ## Input % * __rrect__ rotated rectangle. A structure with the following - % fields: - % * __center__ The rectangle mass center `[x,y]`. - % * __size__ Width and height of the rectangle `[w,h]`. - % * __angle__ The rotation angle in a clockwise direction. - % When the angle is 0, 90, 180, 270 etc., the - % rectangle becomes an up-right rectangle. + % fields: + % * __center__ The rectangle mass center `[x,y]`. + % * __size__ Width and height of the rectangle `[w,h]`. + % * __angle__ The rotation angle in a clockwise direction. When + % the angle is 0, 90, 180, 270 etc., the rectangle becomes an + % up-right rectangle. % % ## Output - % * __pts__ 4-by-2 points matrix of the rectangle vertices. - % `[x1 y1; x2 y2; x3 y3; x4 y4]` + % * __pts__ 4-by-2 points matrix of the rectangle vertices + % `[x1 y1; x2 y2; x3 y3; x4 y4]`. The order is bottom-left, + % top-left, top-right, bottom-right. % % See also: cv.boxPoints, bbox2points % @@ -59,16 +60,16 @@ function rect = boundingRect(rrect) %BOUNDINGRECT Returns the minimal up-right integer rectangle containing the rotated rectangle % - % rect = cv.RotatedRect.boundingRect(rrect) + % rect = cv.RotatedRect.boundingRect(rrect) % % ## Input % * __rrect__ rotated rectangle. A structure with the following - % fields: - % * __center__ The rectangle mass center `[x,y]`. - % * __size__ Width and height of the rectangle `[w,h]`. - % * __angle__ The rotation angle in a clockwise direction. - % When the angle is 0, 90, 180, 270 etc., the - % rectangle becomes an up-right rectangle. + % fields: + % * __center__ The rectangle mass center `[x,y]`. + % * __size__ Width and height of the rectangle `[w,h]`. + % * __angle__ The rotation angle in a clockwise direction. When + % the angle is 0, 90, 180, 270 etc., the rectangle becomes an + % up-right rectangle. % % ## Output % * __rect__ bounding rectangle, a 1-by-4 vector `[x, y, w, h]` @@ -81,16 +82,16 @@ function rect = boundingRect2f(rrect) %BOUNDINGRECT returns the minimal (exact) floating point rectangle containing the rotated rectangle % - % rect = cv.RotatedRect.boundingRect2f(rrect) + % rect = cv.RotatedRect.boundingRect2f(rrect) % % ## Input % * __rrect__ rotated rectangle. A structure with the following - % fields: - % * __center__ The rectangle mass center `[x,y]`. - % * __size__ Width and height of the rectangle `[w,h]`. - % * __angle__ The rotation angle in a clockwise direction. - % When the angle is 0, 90, 180, 270 etc., the - % rectangle becomes an up-right rectangle. + % fields: + % * __center__ The rectangle mass center `[x,y]`. + % * __size__ Width and height of the rectangle `[w,h]`. + % * __angle__ The rotation angle in a clockwise direction. When + % the angle is 0, 90, 180, 270 etc., the rectangle becomes an + % up-right rectangle. % % ## Output % * __rect__ bounding rectangle, a 1-by-4 vector `[x, y, w, h]` diff --git a/+cv/RotationWarper.m b/+cv/RotationWarper.m index 5b9c10c2e..99c4bba07 100644 --- a/+cv/RotationWarper.m +++ b/+cv/RotationWarper.m @@ -5,7 +5,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -17,40 +18,38 @@ function this = RotationWarper(warperType, scale, varargin) %ROTATIONWARPER Constructor % - % obj = cv.RotationWarper(warperType, scale) - % obj = cv.RotationWarper(..., 'OptionName',optionValue, ...) + % obj = cv.RotationWarper(warperType, scale) + % obj = cv.RotationWarper(..., 'OptionName',optionValue, ...) % % ## Input % * __warperType__ image warper factory class type, used to create - % the rotation-based warper. One of: - % * __PlaneWarper__ Plane warper factory class. Warper that - % maps an image onto the `z = 1` plane. - % * __AffineWarper__ Affine warper factory class. Affine - % warper that uses rotations and translations. Uses - % affine transformation in homogeneous coordinates to - % represent both rotation and translation in camera - % rotation matrix. - % * __CylindricalWarper__ Cylindrical warper factory class. - % Warper that maps an image onto the `x*x + z*z = 1` - % cylinder. - % * __SphericalWarper__ Spherical warper factory class. - % Warper that maps an image onto the unit sphere - % located at the origin. Projects image onto unit - % sphere with origin at [0,0,0] and radius `scale`, - % measured in pixels. A 360 panorama would therefore - % have a resulting width of `2*scale*pi` pixels. Poles - % are located at [0,-1,0] and [0,1,0] points. - % * __PlaneWarperGpu__ (requires CUDA) - % * __CylindricalWarperGpu__ (requires CUDA) - % * __SphericalWarperGpu__ (requires CUDA) - % * __FisheyeWarper__ - % * __StereographicWarper__ - % * __CompressedRectilinearWarper__ - % * __CompressedRectilinearPortraitWarper__ - % * __PaniniWarper__ - % * __PaniniPortraitWarper__ - % * __MercatorWarper__ - % * __TransverseMercatorWarper__ + % the rotation-based warper. One of: + % * __PlaneWarper__ Plane warper factory class. Warper that maps + % an image onto the `z = 1` plane. + % * __AffineWarper__ Affine warper factory class. Affine warper + % that uses rotations and translations. Uses affine + % transformation in homogeneous coordinates to represent both + % rotation and translation in camera rotation matrix. + % * __CylindricalWarper__ Cylindrical warper factory class. + % Warper that maps an image onto the `x*x + z*z = 1` cylinder. + % * __SphericalWarper__ Spherical warper factory class. Warper + % that maps an image onto the unit sphere located at the + % origin. Projects image onto unit sphere with origin at + % [0,0,0] and radius `scale`, measured in pixels. A 360 + % panorama would therefore have a resulting width of + % `2*scale*pi` pixels. Poles are located at [0,-1,0] and + % [0,1,0] points. + % * __PlaneWarperGpu__ (requires CUDA) + % * __CylindricalWarperGpu__ (requires CUDA) + % * __SphericalWarperGpu__ (requires CUDA) + % * __FisheyeWarper__ + % * __StereographicWarper__ + % * __CompressedRectilinearWarper__ + % * __CompressedRectilinearPortraitWarper__ + % * __PaniniWarper__ + % * __PaniniPortraitWarper__ + % * __MercatorWarper__ + % * __TransverseMercatorWarper__ % * __scale__ Projected image scale multiplier, e.g. 1.0 % % ## Options @@ -68,7 +67,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.RotationWarper % @@ -79,7 +78,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = RotationWarper_(this.id, 'typeid'); end @@ -90,7 +92,7 @@ function delete(this) function uv = warpPoint(this, pt, K, R) %WARPPOINT Projects the image point % - % uv = obj.warpPoint(pt, K, R) + % uv = obj.warpPoint(pt, K, R) % % ## Input % * __pt__ Source point `[x,y]`. @@ -108,7 +110,7 @@ function delete(this) function [xmap,ymap,bbox] = buildMaps(this, src_size, K, R) %BUILDMAPS Builds the projection maps according to the given camera data % - % [xmap,ymap,bbox] = obj.buildMaps(src_size, K, R) + % [xmap,ymap,bbox] = obj.buildMaps(src_size, K, R) % % ## Input % * **src_size** Source image size `[w,h]`. @@ -128,8 +130,8 @@ function delete(this) function [dst,tl] = warp(this, src, K, R, varargin) %WARP Projects the image % - % [dst,tl] = obj.warp(src, K, R) - % [dst,tl] = obj.warp(src, K, R, 'OptionName',optionValue, ...) + % [dst,tl] = obj.warp(src, K, R) + % [dst,tl] = obj.warp(src, K, R, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image. @@ -142,9 +144,9 @@ function delete(this) % % ## Options % * __InterpMode__ Interpolation mode, see cv.remap. - % default 'Linear' + % default 'Linear' % * __BorderMode__ Border extrapolation mode, see cv.remap. - % default 'Constant' + % default 'Constant' % % See also: cv.RotationWarper.RotationWarper % @@ -154,8 +156,8 @@ function delete(this) function dst = warpBackward(this, src, K, R, dst_size, varargin) %WARPBACKWARD Projects the image backward % - % dst = obj.warpBackward(src, K, R, dst_size) - % dst = obj.warpBackward(src, K, R, dst_size, 'OptionName',optionValue, ...) + % dst = obj.warpBackward(src, K, R, dst_size) + % dst = obj.warpBackward(src, K, R, dst_size, 'OptionName',optionValue, ...) % % ## Input % * __src__ Projected image. @@ -168,9 +170,9 @@ function delete(this) % % ## Options % * __InterpMode__ Interpolation mode, see cv.remap. - % default 'Linear' + % default 'Linear' % * __BorderMode__ Border extrapolation mode, see cv.remap. - % default 'Constant' + % default 'Constant' % % See also: cv.RotationWarper.warp % @@ -180,7 +182,7 @@ function delete(this) function bbox = warpRoi(this, src_size, K, R) %WARPROI Projects image ROI % - % bbox = obj.warpRoi(src_size, K, R) + % bbox = obj.warpRoi(src_size, K, R) % % ## Input % * **src_size** Source image bounding box, `[x,y,w,h]. diff --git a/+cv/SVD.m b/+cv/SVD.m index 26b0ad3e2..29c849601 100644 --- a/+cv/SVD.m +++ b/+cv/SVD.m @@ -11,11 +11,12 @@ % 'NoUV' flag. Another flag 'FullUV' indicates that full-size % `u` and `vt` must be computed, which is not necessary most of the time. % - % See also: cv.SVD.compute + % See also: cv.SVD.compute, cv.PCA % properties (SetAccess = protected) - id % Object ID + % Object ID + id end properties (Dependent) @@ -31,14 +32,14 @@ function this = SVD(varargin) %SVD Default constructor % - % svd = cv.SVD() - % svd = cv.SVD(src, 'OptionName', optionValue, ...) + % svd = cv.SVD() + % svd = cv.SVD(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ decomposed matrix. % % ## Options - % Same option as cv.SVD.compute() method. + % Same option as cv.SVD.compute method. % % In the first form, it initializes an empty SVD structure. % In the second form, it initializes an empty SVD structure and @@ -55,7 +56,7 @@ function delete(this) %DELETE Destructor % - % svd.delete() + % svd.delete() % % See also: cv.SVD % @@ -66,8 +67,8 @@ function delete(this) function compute(this, A, varargin) %COMPUTE The operator that performs SVD % - % svd.compute(A) - % svd.compute(A, 'OptionName', optionValue, ...) + % svd.compute(A) + % svd.compute(A, 'OptionName', optionValue, ...) % % ## Input % * __A__ decomposed matrix, `A = u*diag(w)*vt` @@ -75,16 +76,16 @@ function compute(this, A, varargin) % ## Options % * __Flags__ operation flags. default 0 % * __ModifyA__ allow the algorithm to modify the decomposed - % matrix; it can save space and speed up processing. - % currently ignored. default false + % matrix; it can save space and speed up processing. Currently + % ignored. default false % * __NoUV__ indicates that only a vector of singular values `w` - % is to be processed, while `u` and `vt` will be set to - % empty matrices. default false + % is to be processed, while `u` and `vt` will be set to empty + % matrices. default false % * __FullUV__ when the matrix is not square, by default the - % algorithm produces `u` and `vt` matrices of sufficiently - % large size for the further `A` reconstruction; if, - % however, `FullUV` flag is specified, `u` and `vt` will be - % full-size square orthogonal matrices. default false + % algorithm produces `u` and `vt` matrices of sufficiently large + % size for the further `A` reconstruction; if, however, `FullUV` + % flag is specified, `u` and `vt` will be full-size square + % orthogonal matrices. default false % % The previously allocated `u`, `w` and `vt` are released. % @@ -101,12 +102,12 @@ function compute(this, A, varargin) function dst = backSubst(this, src) %BACKSUBST Performs a singular value back substitution % - % dst = svd.backSubst(src) + % dst = svd.backSubst(src) % % ## Input % * __src__ right-hand side of a linear system `(u*w*v')*dst = src` - % to be solved, where `A` has been previously decomposed - % into `u`, `w`, and `vt` (stored in class). + % to be solved, where `A` has been previously decomposed into + % `u`, `w`, and `vt` (stored in class). % % ## Output % * __dst__ found solution of the system. @@ -114,15 +115,15 @@ function compute(this, A, varargin) % The method calculates a back substitution for the specified % right-hand side. % - % x = vt^T * diag(w)^-1 * u^T * src - % ~ A^-1* src + % x = vt^T * diag(w)^-1 * u^T * src + % ~ A^-1* src % % Using this technique you can either get a very accurate % solution of the convenient linear system, or the best (in the % least-squares terms) pseudo-solution of an overdetermined % linear system. % - % ## Note + % ### Note % Explicit SVD with the further back substitution only % makes sense if you need to solve many linear systems with the % same left-hand side (for example, `src`). If all you need is to @@ -163,8 +164,8 @@ function compute(this, A, varargin) function [w, u, vt] = Compute(A, varargin) %COMPUTE Performs SVD of a matrix % - % [w, u, vt] = cv.SVD.Compute(A) - % [...] = cv.SVD.Compute(..., 'OptionName', optionValue, ...) + % [w, u, vt] = cv.SVD.Compute(A) + % [...] = cv.SVD.Compute(..., 'OptionName', optionValue, ...) % % ## Input % * __A__ Decomposed matrix, A = u*diag(w)*vt @@ -176,12 +177,12 @@ function compute(this, A, varargin) % % ## Options % * __NoUV__ Use only singular values `w`. The algorithm does not - % compute `u` and `vt` matrices. default false + % compute `u` and `vt` matrices. default false % * __FullUV__ When the matrix is not square, by default the - % algorithm produces `u` and `vt` matrices of sufficiently - % large size for the further `A` reconstruction. If, - % however, the 'FullUV' flag is specified, `u` and `vt` are - % full-size square orthogonal matrices. default false + % algorithm produces `u` and `vt` matrices of sufficiently large + % size for the further `A` reconstruction. If, however, the + % 'FullUV' flag is specified, `u` and `vt` are full-size square + % orthogonal matrices. default false % % The function perform SVD of matrix. Unlike the cv.SVD.compute() % method, it returns the results in the output matrices. @@ -194,15 +195,15 @@ function compute(this, A, varargin) function dst = BackSubst(w, u, vt, src) %BACKSUBST Performs back substitution % - % dst = cv.SVD.BackSubst(w, u, vt, src) + % dst = cv.SVD.BackSubst(w, u, vt, src) % % ## Input % * __w__ Singular values % * __u__ Left singular vectors % * __vt__ Transposed matrix of right singular vectors % * __src__ Right-hand side of a linear system `(u*w*v')*dst = src` - % to be solved, where `A` has been previously decomposed - % into `u`, `w`, and `vt` (passed arguments). + % to be solved, where `A` has been previously decomposed + % into `u`, `w`, and `vt` (passed arguments). % % ## Output % * __dst__ Found solution of the system. @@ -210,15 +211,15 @@ function compute(this, A, varargin) % The method computes a back substitution for the specified % right-hand side: % - % x = vt^T * diag(w)^-1 * u^T * src - % ~ A^-1* src + % x = vt^T * diag(w)^-1 * u^T * src + % ~ A^-1* src % % Using this technique you can either get a very accurate % solution of the convenient linear system, or the best (in the % least-squares terms) pseudo-solution of an overdetermined % linear system. % - % ## Note + % ### Note % Explicit SVD with the further back substitution only % makes sense if you need to solve many linear systems with the % same left-hand side (for example, `src`). If all you need is to @@ -234,7 +235,7 @@ function compute(this, A, varargin) function dst = SolveZ(A) %SOLVEZ Solves an under-determined singular linear system % - % dst = cv.SVD.SolveZ(A) + % dst = cv.SVD.SolveZ(A) % % ## Input % * __A__ Left-hand-side matrix. @@ -248,7 +249,7 @@ function compute(this, A, varargin) % solutions. In general, the algorithm solves the following % problem: % - % dst = argmin_{x: ||x||=1} || A * x || + % dst = argmin_{x: ||x||=1} || A * x || % % See also: cv.SVD.Compute, cv.SVD.BackSubst % diff --git a/+cv/SVM.m b/+cv/SVM.m index 1c4978065..e0815e237 100644 --- a/+cv/SVM.m +++ b/+cv/SVM.m @@ -53,42 +53,42 @@ % Default value is `C_SVC`. Possible values: % % * **C_SVC** C-Support Vector Classification. n-class classification - % (`n>=2`), allows imperfect separation of classes with penalty - % multiplier `C` for outliers. + % (`n>=2`), allows imperfect separation of classes with penalty + % multiplier `C` for outliers. % * **NU_SVC** Nu-Support Vector Classification. n-class - % classification with possible imperfect separation. Parameter - % `Nu` (in the range 0..1, the larger the value, the smoother - % the decision boundary) is used instead of `C`. + % classification with possible imperfect separation. Parameter `Nu` + % (in the range 0..1, the larger the value, the smoother the + % decision boundary) is used instead of `C`. % * **ONE_CLASS** Distribution Estimation (One-class SVM). All the - % training data are from the same class, SVM builds a boundary - % that separates the class from the rest of the feature space. + % training data are from the same class, SVM builds a boundary that + % separates the class from the rest of the feature space. % * **EPS_SVR** P-Support Vector Regression. The distance between - % feature vectors from the training set and the fitting - % hyper-plane must be less than `P`. For outliers the penalty - % multiplier `C` is used. + % feature vectors from the training set and the fitting hyper-plane + % must be less than `P`. For outliers the penalty multiplier `C` is + % used. % * **NU_SVR** Nu-Support Vector Regression. `Nu` is used instead of - % `P`. See [LibSVM] for details. + % `P`. See [LibSVM] for details. Type % Type of a SVM kernel. % % Default value is 'RBF'. One of the following predefined kernels: % % * __Custom__ Returned by property in case when custom kernel has - % been set. See cv.SVM.setCustomKernel. + % been set. See cv.SVM.setCustomKernel. % * __Linear__ Linear kernel. No mapping is done, linear - % discrimination (or regression) is done in the original feature - % space. It is the fastest option. `K(x_i,x_j) = x_i' * x_j`. + % discrimination (or regression) is done in the original feature + % space. It is the fastest option. `K(x_i,x_j) = x_i' * x_j`. % * __Poly__ Polynomial kernel. - % `K(x_i,x_j) = (gamma * x_i' * x_j + coef0)^degree, gamma>0`. + % `K(x_i,x_j) = (gamma * x_i' * x_j + coef0)^degree, gamma>0`. % * __RBF__ Radial basis function (RBF), a good choice in most cases. - % `K(x_i,x_j) = exp(-gamma * ||x_i - x_j||^2), gamma>0`. + % `K(x_i,x_j) = exp(-gamma * ||x_i - x_j||^2), gamma>0`. % * __Sigmoid__ Sigmoid kernel. - % `K(x_i,x_j) = tanh(gamma * x_i' * x_j + coef0)`. + % `K(x_i,x_j) = tanh(gamma * x_i' * x_j + coef0)`. % * __Chi2__ Exponential Chi2 kernel, similar to the RBF kernel. - % `K(x_i,x_j) = exp(-gamma * X2(x_i,x_j))`, - % `X2(x_i,x_j) = (x_i - x_j)^2 / (x_i + x_j), gamma>0`. + % `K(x_i,x_j) = exp(-gamma * X2(x_i,x_j))`, + % `X2(x_i,x_j) = (x_i - x_j)^2 / (x_i + x_j), gamma>0`. % * __Intersection__ Histogram intersection kernel. A fast kernel. - % `K(x_i,x_j) = min(x_i,x_j)`. + % `K(x_i,x_j) = min(x_i,x_j)`. KernelType % Parameter `degree` of a kernel function. % @@ -141,8 +141,8 @@ function this = SVM(varargin) %SVM Creates/trains a new SVM instance % - % model = cv.SVM() - % model = cv.SVM(...) + % model = cv.SVM() + % model = cv.SVM(...) % % The first variant creates an empty model. Use cv.SVM.train to % train the model. Since SVM has several parameters, you may want @@ -163,7 +163,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.SVM % @@ -177,7 +177,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -193,11 +193,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.SVM.clear, cv.SVM.load % @@ -207,17 +207,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -231,23 +231,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -261,11 +260,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SVM.save, cv.SVM.load % @@ -278,7 +277,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -291,7 +290,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -304,12 +303,12 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier (`C_SVC`, - % `NU_SVC`, or `ONE_CLASS`), false if the model is a - % regressor (`EPS_SVR`, `NU_SVR`). + % `NU_SVC`, or `ONE_CLASS`), false if the model is a regressor + % (`EPS_SVR`, `NU_SVR`). % % See also: cv.SVM.isTrained % @@ -319,120 +318,115 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the statistical model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should have - % `single` type. By default, each row represents a sample - % (see the `Layout` option). + % `single` type. By default, each row represents a sample (see + % the `Layout` option). % * __responses__ matrix of associated responses. If the responses - % are scalar, they should be stored as a vector (as a single - % row or a single column matrix). The matrix should have - % type `single` or `int32` (in the former case the responses - % are considered as ordered (numerical) by default; in the - % latter case as categorical). You can override the defaults - % using the `VarType` option. + % are scalar, they should be stored as a vector (as a single row + % or a single column matrix). The matrix should have type + % `single` or `int32` (in the former case the responses are + % considered as ordered (numerical) by default; in the latter + % case as categorical). You can override the defaults using the + % `VarType` option. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -457,19 +451,19 @@ function load(this, fname_or_str, varargin) % the arguments, meaning that all of the variables/samples are % used for training. % - % ### Example + % ## Example % For example, an `Nx4` samples matrix of row layout with four % numerical variables and one categorical response variable `Nx1` % can be specified as: % - % model.train(samples, responses, 'Flags',0, ... - % 'Data',{'Layout','Row', 'VarType','NNNNC'}); + % model.train(samples, responses, 'Flags',0, ... + % 'Data',{'Layout','Row', 'VarType','NNNNC'}); % - % ### Example + % ## Example % You can also directly load a dataset from a CSV file: % - % model.train('C:\path\to\data.csv', [], 'Flags',0, ... - % 'Data',{'HeaderLineCount',1, 'Delimiter',','}); + % model.train('C:\path\to\data.csv', [], 'Flags',0, ... + % 'Data',{'HeaderLineCount',1, 'Delimiter',','}); % % See also: cv.SVM.trainAuto, cv.SVM.predict, cv.SVM.calcError % @@ -479,10 +473,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -496,14 +490,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -517,8 +510,8 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples, floating-point matrix. @@ -526,21 +519,21 @@ function load(this, fname_or_str, varargin) % ## Output % * __results__ The output matrix of results. % * __f__ If you pass one sample then prediction result is - % returned here, otherwise unused and returns 0. If you want - % to get responses for several samples then `results` stores - % all response predictions for corresponding samples. + % returned here, otherwise unused and returns 0. If you want to + % get responses for several samples then `results` stores all + % response predictions for corresponding samples. % % ## Options % * __Flags__ The optional predict flags, model-dependent. For - % convenience, you can set the individual flag options - % below, instead of directly setting bits here. default 0 + % convenience, you can set the individual flag options below, + % instead of directly setting bits here. default 0 % * __RawOutput__ makes the method return the raw results (the - % sum), not the class label. This flag specifies the type of - % the return value. If true and the problem is 2-class - % classification then the method returns the decision - % function value that is signed distance to the margin, else - % the function returns a class label (classification) or - % estimated function value (regression). default false + % sum), not the class label. This flag specifies the type of the + % return value. If true and the problem is 2-class + % classification then the method returns the decision function + % value that is signed distance to the margin, else the function + % returns a class label (classification) or estimated function + % value (regression). default false % % The function is parallelized with the TBB library. % @@ -555,9 +548,9 @@ function load(this, fname_or_str, varargin) function status = trainAuto(this, samples, responses, varargin) %TRAINAUTO Trains an SVM with optimal parameters % - % status = model.trainAuto(samples, responses) - % status = model.trainAuto(csvFilename, []) - % [...] = model.trainAuto(..., 'OptionName', optionValue, ...) + % status = model.trainAuto(samples, responses) + % status = model.trainAuto(csvFilename, []) + % [...] = model.trainAuto(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -570,44 +563,41 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __KFold__ Cross-validation parameter. The training set is - % divided into `KFold` subsets. One subset is used to test - % the model, the others form the train set. So, the SVM - % algorithm is executed `KFold` times. default 10 + % divided into `KFold` subsets. One subset is used to test the + % model, the others form the train set. So, the SVM algorithm is + % executed `KFold` times. default 10 % * __Balanced__ If true and the problem is 2-class classification - % then the method creates more balanced cross-validation - % subsets that is proportions between classes in subsets are - % close to such proportion in the whole train dataset. - % default false + % then the method creates more balanced cross-validation subsets + % that is proportions between classes in subsets are close to + % such proportion in the whole train dataset. default false % * __CGrid__, __GammaGrid__, __NuGrid__, % __PGrid__, __CoeffGrid__, __DegreeGrid__ - % Iteration grid for the corresponding SVM parameter. - % A structure that represents the logarithmic grid range of - % SVM parameters. It is used for optimizing model accuracy - % by varying model parameters, the accuracy estimate being - % computed by cross-validation. It accepts a struct having - % the fields below. It also accepts a 3-element vector in - % which each parameter is specified in the same order as the - % supported struct: - % - % * __minVal__ Minimum value of the model parameter - % * __maxVal__ Maximum value of the model parameter - % * __logStep__ Logarithmic step for iterating the model - % parameter - % - % The grid determines the following iteration sequence of - % the model parameter values: `minVal * logStep.^(0:n)`, - % where `n` is the maximal index satisfying - % `minVal*logStep^n < maxVal`. The grid is logarithmic, so - % `logStep` must always be greater then 1. - % - % Defaults are: - % - % * 'CGrid' : `struct('minVal',0.1, 'maxVal',500, 'logStep',5 )` - % * 'GammaGrid' : `struct('minVal',1e-5, 'maxVal',0.6, 'logStep',15)` - % * 'PGrid' : `struct('minVal',0.01, 'maxVal',100, 'logStep',7 )` - % * 'NuGrid' : `struct('minVal',0.01, 'maxVal',0.2, 'logStep',3 )` - % * 'CoeffGrid' : `struct('minVal',0.1, 'maxVal',300, 'logStep',14)` - % * 'DegreeGrid': `struct('minVal',0.01, 'maxVal',4, 'logStep',7 )` + % Iteration grid for the corresponding SVM parameter. + % A structure that represents the logarithmic grid range of SVM + % parameters. It is used for optimizing model accuracy by + % varying model parameters, the accuracy estimate being computed + % by cross-validation. It accepts a struct having the fields + % below. It also accepts a 3-element vector in which each + % parameter is specified in the same order as the supported + % struct: + % + % * __minVal__ Minimum value of the model parameter + % * __maxVal__ Maximum value of the model parameter + % * __logStep__ Logarithmic step for iterating the model + % parameter + % + % The grid determines the following iteration sequence of the + % model parameter values: `minVal * logStep.^(0:n)`, where `n` + % is the maximal index satisfying `minVal*logStep^n < maxVal`. + % The grid is logarithmic, so `logStep` must always be greater + % than 1. Defaults are: + % + % * 'CGrid' : `struct('minVal',0.1, 'maxVal',500, 'logStep',5 )` + % * 'GammaGrid' : `struct('minVal',1e-5, 'maxVal',0.6, 'logStep',15)` + % * 'PGrid' : `struct('minVal',0.01, 'maxVal',100, 'logStep',7 )` + % * 'NuGrid' : `struct('minVal',0.01, 'maxVal',0.2, 'logStep',3 )` + % * 'CoeffGrid' : `struct('minVal',0.1, 'maxVal',300, 'logStep',14)` + % * 'DegreeGrid': `struct('minVal',0.01, 'maxVal',4, 'logStep',7 )` % % The method trains the SVM model automatically by choosing the % optimal parameters `C`, `Gamma`, `P`, `Nu`, `Coef0`, `Degree` of @@ -639,27 +629,27 @@ function load(this, fname_or_str, varargin) function [alpha,svidx,rho] = getDecisionFunction(this, index) %GETDECISIONFUNCTION Retrieves the decision function % - % [alpha,svidx,rho] = model.getDecisionFunction(index) + % [alpha,svidx,rho] = model.getDecisionFunction(index) % % ## Input - % * __index__ the index of the decision function (0-based). If - % the problem solved is regression, 1-class or 2-class - % classification, then there will be just one decision - % function and the index should always be 0. Otherwise, in - % the case of N-class classification, there will be - % `N(N-1)/2` decision functions. + % * __index__ the index of the decision function (0-based). If the + % problem solved is regression, 1-class or 2-class + % classification, then there will be just one decision function + % and the index should always be 0. Otherwise, in the case of + % N-class classification, there will be `N(N-1)/2` decision + % functions. % % ## Output % * __alpha__ the optional output vector for weights, - % corresponding to different support vectors. In the case of - % linear SVM all the alpha's will be 1's. + % corresponding to different support vectors. In the case of + % linear SVM all the alpha's will be 1's. % * __svidx__ the optional output vector of indices of support - % vectors within the matrix of support vectors (which can be - % retrieved by cv.SVM.getSupportVectors. In the case of - % linear SVM each decision function consists of a single - % "compressed" support vector. + % vectors within the matrix of support vectors (which can be + % retrieved by cv.SVM.getSupportVectors. In the case of linear + % SVM each decision function consists of a single "compressed" + % support vector. % * __rho__ `rho` parameter of the decision function, a scalar - % subtracted from the weighted sum of kernel responses. + % subtracted from the weighted sum of kernel responses. % % See also: cv.SVM.getSupportVectors % @@ -669,7 +659,7 @@ function load(this, fname_or_str, varargin) function sv = getSupportVectors(this) %GETSUPPORTVECTORS Retrieves all the support vectors % - % sv = model.getSupportVectors() + % sv = model.getSupportVectors() % % ## Output % * __sv__ Support vectors. @@ -686,7 +676,7 @@ function load(this, fname_or_str, varargin) function sv = getUncompressedSupportVectors(this) %GETUNCOMPRESSEDSUPPORTVECTORS Retrieves all the uncompressed support vectors of a linear SVM % - % sv = model.getUncompressedSupportVectors() + % sv = model.getUncompressedSupportVectors() % % ## Output % * __sv__ Uncompressed support vectors. @@ -705,13 +695,13 @@ function load(this, fname_or_str, varargin) function setCustomKernel(this, kernelFunc) %SETCUSTOMKERNEL Initialize with custom kernel % - % model.setCustomKernel(kernelFunc) + % model.setCustomKernel(kernelFunc) % % ## Input % * __kernelFunc__ string, name of an M-function that implements a - % kernel function. See example below. + % kernel function. See example below. % - % ## Note + % ### Note % Parts of `cv::ml::SVM` implementation are thread-parallelized % (for example `SVM::predict` runs a `ParallelLoopBody`). By using % a custom kernel, we would be calling a MATLAB function @@ -730,35 +720,35 @@ function setCustomKernel(this, kernelFunc) % sample in "vecs" against "another". It will be called during % training and prediction by the SVM class. % - % function results = my_custom_kernel(vecs, another) - % [vcount,n] = size(vecs); - % results = zeros(vcount, 1, 'single'); - % for i=1:vcount - % results(i) = dot(vecs(i,:), another); - % end + % function results = my_custom_kernel(vecs, another) + % [vcount,n] = size(vecs); + % results = zeros(vcount, 1, 'single'); + % for i=1:vcount + % results(i) = dot(vecs(i,:), another); + % end % - % % or computed in a vectorized manner as - % %results = sum(bsxfun(@times, vecs, another), 2); + % % or computed in a vectorized manner as + % %results = sum(bsxfun(@times, vecs, another), 2); % - % % or simply written as matrix-vector product - % %results = vecs * another.'; - % end + % % or simply written as matrix-vector product + % %results = vecs * another.'; + % end % % We use the custom kernel in the following manner: % - % % load some data for classification - % load fisheriris - % samples = meas; - % responses = int32(grp2idx(species)); + % % load some data for classification + % load fisheriris + % samples = meas; + % responses = int32(grp2idx(species)); % - % cv.Utils.setNumThreads(1) % see above note + % cv.Utils.setNumThreads(1) % see above note % - % model = cv.SVM(); - % model.setCustomKernel('my_custom_kernel'); - % model.train(samples, responses) - % nnz(model.predict(samples) == responses) + % model = cv.SVM(); + % model.setCustomKernel('my_custom_kernel'); + % model.train(samples, responses) + % nnz(model.predict(samples) == responses) % - % cv.Utils.setNumThreads(cv.Utils.getNumberOfCPUs()) + % cv.Utils.setNumThreads(cv.Utils.getNumberOfCPUs()) % % See also: cv.SVM.KernelType % diff --git a/+cv/SVMSGD.m b/+cv/SVMSGD.m index 20b8919bc..ec2c9bd78 100644 --- a/+cv/SVMSGD.m +++ b/+cv/SVMSGD.m @@ -19,7 +19,7 @@ % * __SGD__ is the classic version of SVMSGD classifier: every next step % is calculated by the formula: % - % w_{t+1} = w_t - gamma(t) * (dQ_i/dw)|_{w = w_t} + % w_{t+1} = w_t - gamma(t) * (dQ_i/dw)|_{w = w_t} % % where % @@ -80,12 +80,12 @@ % % Example: % - % % Create empty object - % svmsgd = cv.SVMSGD(); - % % Train the Stochastic Gradient Descent SVM - % svmsgd.train(trainData); - % % Predict labels for the new samples - % svmsgd.predict(samples, responses); + % % Create empty object + % svmsgd = cv.SVMSGD(); + % % Train the Stochastic Gradient Descent SVM + % svmsgd.train(trainData); + % % Predict labels for the new samples + % svmsgd.predict(samples, responses); % % ## References % [bottou2010large]: @@ -113,9 +113,9 @@ % Default 'SoftMargin'. One of: % % * __SoftMargin__ General case, suits to the case of non-linearly - % separable sets, allows outliers. + % separable sets, allows outliers. % * __HardMargin__ More accurate for the case of linearly separable - % sets. + % sets. MarginType % Parameter margin regularization of a SVMSGD optimization problem. MarginRegularization @@ -141,8 +141,8 @@ function this = SVMSGD(varargin) %SVMSGD Creates empty model % - % model = cv.SVMSGD() - % model = cv.SVMSGD(...) + % model = cv.SVMSGD() + % model = cv.SVMSGD(...) % % The first variant creates an empty model. Use cv.SVMSGD.train to % train the model. Since SVMSGD has several parameters, you may @@ -163,7 +163,7 @@ function delete(this) %DELETE Destructor % - % model.delete() + % model.delete() % % See also: cv.SVMSGD % @@ -177,7 +177,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % model.clear() + % model.clear() % % The method clear does the same job as the destructor: it % deallocates all the memory occupied by the class members. But @@ -193,11 +193,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = model.empty() + % b = model.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.SVMSGD.clear, cv.SVMSGD.load % @@ -207,17 +207,17 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves the algorithm parameters to a file or a string % - % model.save(filename) - % str = model.save(filename) + % model.save(filename) + % str = model.save(filename) % % ## Input % * __filename__ Name of the file to save to. In case of string - % output, only the filename extension is used to determine - % the output format (XML or YAML). + % output, only the filename extension is used to determine the + % output format (XML or YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % This method stores the complete model state to the specified % XML or YAML file (or to a string in memory, based on the number @@ -231,23 +231,22 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % model.load(filename) - % model.load(str, 'FromString',true) - % model.load(..., 'OptionName',optionValue, ...) + % model.load(filename) + % model.load(str, 'FromString',true) + % model.load(..., 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model - % (switches between `Algorithm::load()` and - % `Algorithm::loadFromString()` C++ methods). - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model (switches + % between `Algorithm::load()` and + % `Algorithm::loadFromString()` C++ methods). default false % % This method loads the complete model state from the specified % XML or YAML file (either from disk or serialized string). The @@ -261,11 +260,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = model.getDefaultName() + % name = model.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SVMSGD.save, cv.SVMSGD.load % @@ -278,7 +277,7 @@ function load(this, fname_or_str, varargin) function count = getVarCount(this) %GETVARCOUNT Returns the number of variables in training samples % - % count = model.getVarCount() + % count = model.getVarCount() % % ## Output % * __count__ number of variables in training samples. @@ -291,7 +290,7 @@ function load(this, fname_or_str, varargin) function b = isTrained(this) %ISTRAINED Returns true if the model is trained % - % b = model.isTrained() + % b = model.isTrained() % % ## Output % * __b__ Returns true if the model is trained, false otherwise. @@ -304,11 +303,11 @@ function load(this, fname_or_str, varargin) function b = isClassifier(this) %ISCLASSIFIER Returns true if the model is a classifier % - % b = model.isClassifier() + % b = model.isClassifier() % % ## Output % * __b__ Returns true if the model is a classifier, false if the - % model is a regressor. + % model is a regressor. % % See also: cv.SVMSGD.isTrained % @@ -318,120 +317,115 @@ function load(this, fname_or_str, varargin) function status = train(this, samples, responses, varargin) %TRAIN Trains the statistical model % - % status = model.train(samples, responses) - % status = model.train(csvFilename, []) - % [...] = model.train(..., 'OptionName', optionValue, ...) + % status = model.train(samples, responses) + % status = model.train(csvFilename, []) + % [...] = model.train(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ matrix of training samples. It should have - % `single` type. By default, each row represents a sample - % (see the `Layout` option). + % `single` type. By default, each row represents a sample (see + % the `Layout` option). % * __responses__ matrix of associated responses. If the responses - % are scalar, they should be stored as a vector (as a single - % row or a single column matrix). The matrix should have - % type `single` or `int32` (in the former case the responses - % are considered as ordered (numerical) by default; in the - % latter case as categorical). You can override the defaults - % using the `VarType` option. + % are scalar, they should be stored as a vector (as a single row + % or a single column matrix). The matrix should have type + % `single` or `int32` (in the former case the responses are + % considered as ordered (numerical) by default; in the latter + % case as categorical). You can override the defaults using the + % `VarType` option. % * __csvFilename__ The input CSV file name from which to load - % dataset. In this variant, you should set the second - % argument to an empty array. + % dataset. In this variant, you should set the second argument + % to an empty array. % % ## Output % * __status__ Success flag. % % ## Options % * __Data__ Training data options, specified as a cell array of - % key/value pairs of the form `{'key',val, ...}`. See below. + % key/value pairs of the form `{'key',val, ...}`. See below. % * __Flags__ The optional training flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % ### Options for `Data` (first variant with samples and reponses) % * __Layout__ Sample types. Default 'Row'. One of: - % * __Row__ each training sample is a row of samples. - % * __Col__ each training sample occupies a column of - % samples. + % * __Row__ each training sample is a row of samples. + % * __Col__ each training sample occupies a column of samples. % * __VarIdx__ vector specifying which variables to use for - % training. It can be an integer vector (`int32`) containing - % 0-based variable indices or logical vector (`uint8` or - % `logical`) containing a mask of active variables. Not set - % by default, which uses all variables in the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based variable indices or logical vector (`uint8` or + % `logical`) containing a mask of active variables. Not set by + % default, which uses all variables in the input data. % * __SampleIdx__ vector specifying which samples to use for - % training. It can be an integer vector (`int32`) containing - % 0-based sample indices or logical vector (`uint8` or - % `logical`) containing a mask of training samples of - % interest. Not set by default, which uses all samples in - % the input data. + % training. It can be an integer vector (`int32`) containing + % 0-based sample indices or logical vector (`uint8` or + % `logical`) containing a mask of training samples of interest. + % Not set by default, which uses all samples in the input data. % * __SampleWeights__ optional floating-point vector with weights - % for each sample. Some samples may be more important than - % others for training. You may want to raise the weight of - % certain classes to find the right balance between hit-rate - % and false-alarm rate, and so on. Not set by default, which - % effectively assigns an equal weight of 1 for all samples. + % for each sample. Some samples may be more important than + % others for training. You may want to raise the weight of + % certain classes to find the right balance between hit-rate and + % false-alarm rate, and so on. Not set by default, which + % effectively assigns an equal weight of 1 for all samples. % * __VarType__ optional vector of type `uint8` and size - % ` + `, - % containing types of each input and output variable. By - % default considers all variables as numerical (both input - % and output variables). In case there is only one output - % variable of integer type, it is considered categorical. - % You can also specify a cell-array of strings (or as one - % string of single characters, e.g 'NNNC'). Possible values: - % * __Numerical__, __N__ same as 'Ordered' - % * __Ordered__, __O__ ordered variables - % * __Categorical__, __C__ categorical variables + % ` + `, + % containing types of each input and output variable. By default + % considers all variables as numerical (both input and output + % variables). In case there is only one output variable of + % integer type, it is considered categorical. You can also + % specify a cell-array of strings (or as one string of single + % characters, e.g 'NNNC'). Possible values: + % * __Numerical__, __N__ same as 'Ordered' + % * __Ordered__, __O__ ordered variables + % * __Categorical__, __C__ categorical variables % * __MissingMask__ Indicator mask for missing observation (not - % currently implemented). Not set by default + % currently implemented). Not set by default % * __TrainTestSplitCount__ divides the dataset into train/test - % sets, by specifying number of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying number of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitRatio__ divides the dataset into train/test - % sets, by specifying ratio of samples to use for the test - % set. By default all samples are used for the training set. + % sets, by specifying ratio of samples to use for the test set. + % By default all samples are used for the training set. % * __TrainTestSplitShuffle__ when splitting dataset into - % train/test sets, specify whether to shuffle the samples. - % Otherwise samples are assigned sequentially (first train - % then test). default true + % train/test sets, specify whether to shuffle the samples. + % Otherwise samples are assigned sequentially (first train then + % test). default true % % ### Options for `Data` (second variant for loading CSV file) % * __HeaderLineCount__ The number of lines in the beginning to - % skip; besides the header, the function also skips empty - % lines and lines staring with '#'. default 1 + % skip; besides the header, the function also skips empty lines + % and lines staring with '#'. default 1 % * __ResponseStartIdx__ Index of the first output variable. If - % -1, the function considers the last variable as the - % response. If the dataset only contains input variables and - % no responses, use `ResponseStartIdx = -2` and - % `ResponseEndIdx = 0`, then the output variables vector - % will just contain zeros. default -1 + % -1, the function considers the last variable as the response. + % If the dataset only contains input variables and no responses, + % use `ResponseStartIdx = -2` and `ResponseEndIdx = 0`, then the + % output variables vector will just contain zeros. default -1 % * __ResponseEndIdx__ Index of the last output variable + 1. If - % -1, then there is single response variable at - % `ResponseStartIdx`. default -1 + % -1, then there is single response variable at + % `ResponseStartIdx`. default -1 % * __VarTypeSpec__ The optional text string that specifies the - % variables' types. It has the format - % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, - % variables from `n1` to `n2` (inclusive range), `n3`, `n4` - % to `n5` ... are considered ordered and `n6`, `n7` to - % `n8` ... are considered as categorical. The range - % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` - % should cover all the variables. If `VarTypeSpec` is not - % specified, then algorithm uses the following rules: - % * all input variables are considered ordered by default. - % If some column contains has non- numerical values, e.g. - % 'apple', 'pear', 'apple', 'apple', 'mango', the - % corresponding variable is considered categorical. - % * if there are several output variables, they are all - % considered as ordered. Errors are reported when - % non-numerical values are used. - % * if there is a single output variable, then if its values - % are non-numerical or are all integers, then it's - % considered categorical. Otherwise, it's considered - % ordered. + % variables' types. It has the format + % `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables + % from `n1` to `n2` (inclusive range), `n3`, `n4` to `n5` ... + % are considered ordered and `n6`, `n7` to `n8` ... are + % considered as categorical. The range + % `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` should + % cover all the variables. If `VarTypeSpec` is not specified, + % then algorithm uses the following rules: + % * all input variables are considered ordered by default. If + % some column contains has non- numerical values, e.g. + % 'apple', 'pear', 'apple', 'apple', 'mango', the + % corresponding variable is considered categorical. + % * if there are several output variables, they are all + % considered as ordered. Errors are reported when + % non-numerical values are used. + % * if there is a single output variable, then if its values are + % non-numerical or are all integers, then it's considered + % categorical. Otherwise, it's considered ordered. % * __Delimiter__ The character used to separate values in each - % line. default ',' + % line. default ',' % * __Missing__ The character used to specify missing - % measurements. It should not be a digit. Although it's a - % non-numerical value, it surely does not affect the - % decision of whether the variable ordered or categorical. - % default '?' + % measurements. It should not be a digit. Although it's a + % non-numerical value, it surely does not affect the decision of + % whether the variable ordered or categorical. default '?' % * __TrainTestSplitCount__ same as above. % * __TrainTestSplitRatio__ same as above. % * __TrainTestSplitShuffle__ same as above. @@ -444,10 +438,10 @@ function load(this, fname_or_str, varargin) function [err,resp] = calcError(this, samples, responses, varargin) %CALCERROR Computes error on the training or test dataset % - % err = model.calcError(samples, responses) - % err = model.calcError(csvFilename, []) - % [err,resp] = model.calcError(...) - % [...] = model.calcError(..., 'OptionName', optionValue, ...) + % err = model.calcError(samples, responses) + % err = model.calcError(csvFilename, []) + % [err,resp] = model.calcError(...) + % [...] = model.calcError(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ See the train method. @@ -461,14 +455,13 @@ function load(this, fname_or_str, varargin) % ## Options % * __Data__ See the train method. % * __TestError__ if true, the error is computed over the test - % subset of the data, otherwise it's computed over the - % training subset of the data. Please note that if you - % loaded a completely different dataset to evaluate an - % already trained classifier, you will probably want not to - % set the test subset at all with `TrainTestSplitRatio` and - % specify `TestError=false`, so that the error is computed - % for the whole new set. Yes, this sounds a bit confusing. - % default false + % subset of the data, otherwise it's computed over the training + % subset of the data. Please note that if you loaded a + % completely different dataset to evaluate an already trained + % classifier, you will probably want not to set the test subset + % at all with `TrainTestSplitRatio` and specify + % `TestError=false`, so that the error is computed for the whole + % new set. Yes, this sounds a bit confusing. default false % % The method uses the predict method to compute the error. For % regression models the error is computed as RMS, for classifiers @@ -482,8 +475,8 @@ function load(this, fname_or_str, varargin) function [results,f] = predict(this, samples, varargin) %PREDICT Predicts response(s) for the provided sample(s) % - % [results,f] = model.predict(samples) - % [...] = model.predict(..., 'OptionName', optionValue, ...) + % [results,f] = model.predict(samples) + % [...] = model.predict(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ The input samples, floating-point matrix. @@ -491,13 +484,13 @@ function load(this, fname_or_str, varargin) % ## Output % * __results__ The output matrix of results. % * __f__ If you pass one sample then prediction result is - % returned here, otherwise unused and returns 0. If you want - % to get responses for several samples then `results` stores - % all response predictions for corresponding samples. + % returned here, otherwise unused and returns 0. If you want to + % get responses for several samples then `results` stores all + % response predictions for corresponding samples. % % ## Options % * __Flags__ The optional predict flags, model-dependent. - % Not used. default 0 + % Not used. default 0 % % See also: cv.SVMSGD.train, cv.SVMSGD.calcError % @@ -510,11 +503,11 @@ function load(this, fname_or_str, varargin) function weights = getWeights(this) %GETWEIGHTS Get model weights % - % weights = model.getWeights() + % weights = model.getWeights() % % ## Output % * __weights__ the weights of the trained model (decision - % function `f(x) = weights * x + shift`). + % function `f(x) = weights * x + shift`). % % See also: cv.SVMSGD.getShift % @@ -524,11 +517,11 @@ function load(this, fname_or_str, varargin) function shift = getShift(this) %GETSHIFT Get model shift % - % shift = model.getShift() + % shift = model.getShift() % % ## Output % * __shift__ the shift of the trained model (decision function - % `f(x) = weights * x + shift`). + % `f(x) = weights * x + shift`). % % See also: cv.SVMSGD.getWeights % @@ -538,16 +531,16 @@ function load(this, fname_or_str, varargin) function setOptimalParameters(this, varargin) %SETOPTIMALPARAMETERS Function sets optimal parameters values for chosen SVM SGD model % - % model.setOptimalParameters('OptionName',optionValue, ...) + % model.setOptimalParameters('OptionName',optionValue, ...) % % ## Options % * __SvmsgdType__ the type of SVMSGD classifier, default 'ASGD'. % * __MarginType__ the type of margin constraint, default - % 'SoftMargin'. + % 'SoftMargin'. % % This sets the properties `MarginRegularization`, - % `InitialStepSize`, `StepDecreasingPower`, and `TermCriteria` - % to optimal values according to model type. + % `InitialStepSize`, `StepDecreasingPower`, and `TermCriteria` to + % optimal values according to model type. % % See also: cv.SVMSGD.SvmsgdType, cv.SVMSGD.MarginType % diff --git a/+cv/Scharr.m b/+cv/Scharr.m index a3a8e5113..e9422ba32 100644 --- a/+cv/Scharr.m +++ b/+cv/Scharr.m @@ -1,46 +1,46 @@ %SCHARR Calculates the first x- or y- image derivative using Scharr operator % -% dst = cv.Scharr(src) -% dst = cv.Scharr(src, 'OptionName',optionValue, ...) +% dst = cv.Scharr(src) +% dst = cv.Scharr(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image. % % ## Output % * __dst__ output image of the same size and the same number of channels as -% `src`. +% `src`. % % ## Options % * __XOrder__ Order of the derivative x. default 1 % * __YOrder__ Order of the derivative y. default 0 % * __Scale__ Optional scale factor for the computed Laplacian values. By -% default, no scaling is applied (see cv.getDerivKernels for details). -% default 1 +% default, no scaling is applied (see cv.getDerivKernels for details). +% default 1 % * __Delta__ Optional delta value that is added to the results prior to -% storing them in `dst`. default 0 +% storing them in `dst`. default 0 % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. -% Default 'Default' +% Default 'Default' % * __DDepth__ output image depth. default -1. When `DDepth=-1`, the output -% image will have the same depth as the source. The following -% combinations are supported: -% * SDepth = 'uint8' --> DDepth = -1, 'int16', 'single', 'double' -% * SDepth = 'uint16', 'int16' --> DDepth = -1, 'single', 'double' -% * SDepth = 'single' --> DDepth = -1, 'single', 'double' -% * SDepth = 'double' --> DDepth = -1, 'double' +% image will have the same depth as the source. The following combinations +% are supported: +% * `SDepth = uint8 --> DDepth = -1, int16, single, double` +% * `SDepth = uint16, int16 --> DDepth = -1, single, double` +% * `SDepth = single --> DDepth = -1, single, double` +% * `SDepth = double --> DDepth = -1, double` % % The function computes the first x- or y- spatial image derivative using the % Scharr operator. % % The call: % -% dst = cv.Scharr(src, 'DDepth',ddepth, 'XOrder',dx, 'YOrder',dy, ... -% 'Scale',scale, 'Delta',delta, 'BorderType',borderType) +% dst = cv.Scharr(src, 'DDepth',ddepth, 'XOrder',dx, 'YOrder',dy, ... +% 'Scale',scale, 'Delta',delta, 'BorderType',borderType) % % is equivalent to: % -% dst = cv.Sobel(src, 'DDepth',ddepth, 'XOrder',dx, 'YOrder',dy, ... -% 'KSize','Scharr', ... -% 'Scale',scale, 'Delta',delta, 'BorderType',borderType) +% dst = cv.Sobel(src, 'DDepth',ddepth, 'XOrder',dx, 'YOrder',dy, ... +% 'KSize','Scharr', ... +% 'Scale',scale, 'Delta',delta, 'BorderType',borderType) % % See also: cv.Sobel, cv.cartToPolar % diff --git a/+cv/SeamFinder.m b/+cv/SeamFinder.m index 00499f2f2..21f660d9e 100644 --- a/+cv/SeamFinder.m +++ b/+cv/SeamFinder.m @@ -5,38 +5,39 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SeamFinder(seamType, varargin) %SEAMFINDER Constructor % - % obj = cv.SeamFinder(seamType) - % obj = cv.SeamFinder(seamType, 'OptionName',optionValue, ...) + % obj = cv.SeamFinder(seamType) + % obj = cv.SeamFinder(seamType, 'OptionName',optionValue, ...) % % ## Input % * __seamType__ seam estimator type. One of: - % * __NoSeamFinder__ Stub seam estimator which does nothing. - % * __VoronoiSeamFinder__ Voronoi diagram-based pairwise - % seam estimator. - % * __DpSeamFinder__ - % * __GraphCutSeamFinder__ Minimum graph cut-based seam - % estimator. See details in [V03]. - % * __GraphCutSeamFinderGpu__ (requires CUDA) + % * __NoSeamFinder__ Stub seam estimator which does nothing. + % * __VoronoiSeamFinder__ Voronoi diagram-based pairwise seam + % estimator. + % * __DpSeamFinder__ + % * __GraphCutSeamFinder__ Minimum graph cut-based seam + % estimator. See details in [V03]. + % * __GraphCutSeamFinderGpu__ (requires CUDA) % % ## Options % The following are options for the various algorithms: % % ### `DpSeamFinder` % * __CostFunction__ default 'Color'. One of: - % * __Color__ - % * __ColorGrad__ + % * __Color__ + % * __ColorGrad__ % % ### `GraphCutSeamFinder` % * __CostType__ default 'ColorGrad'. One of: - % * __Color__ - % * __ColorGrad__ + % * __Color__ + % * __ColorGrad__ % * __TerminalCost__ default 10000.0 % * __BadRegionPenaly__ default 1000.0 % @@ -55,7 +56,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SeamFinder % @@ -66,7 +67,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = SeamFinder_(this.id, 'typeid'); end @@ -77,12 +81,12 @@ function delete(this) function masks = find(this, src, corners, masks) %FIND Estimates seams % - % masks = obj.find(src, corners, masks) + % masks = obj.find(src, corners, masks) % % ## Input % * __src__ Source cell-array of images. % * __corners__ Source image top-left corners, cell-array of 2D - % points `{[x,y], ...}`. + % points `{[x,y], ...}`. % * __masks__ Source cell-array of image masks to update. % % ## Output diff --git a/+cv/ShapeContextDistanceExtractor.m b/+cv/ShapeContextDistanceExtractor.m index e4e87f889..f0184ff80 100644 --- a/+cv/ShapeContextDistanceExtractor.m +++ b/+cv/ShapeContextDistanceExtractor.m @@ -15,7 +15,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -80,36 +81,36 @@ function this = ShapeContextDistanceExtractor(varargin) %SHAPECONTEXTDISTANCEEXTRACTOR Constructor % - % obj = cv.ShapeContextDistanceExtractor() - % obj = cv.ShapeContextDistanceExtractor('OptionName',optionValue, ...) + % obj = cv.ShapeContextDistanceExtractor() + % obj = cv.ShapeContextDistanceExtractor('OptionName',optionValue, ...) % % ## Options % * __AngularBins__ see - % cv.ShapeContextDistanceExtractor.AngularBins, default 12 + % cv.ShapeContextDistanceExtractor.AngularBins, default 12 % * __RadialBins__ see - % cv.ShapeContextDistanceExtractor.RadialBins, default 4 + % cv.ShapeContextDistanceExtractor.RadialBins, default 4 % * __InnerRadius__ see - % cv.ShapeContextDistanceExtractor.InnerRadius, default 0.2 + % cv.ShapeContextDistanceExtractor.InnerRadius, default 0.2 % * __OuterRadius__ see - % cv.ShapeContextDistanceExtractor.OuterRadius, default 2 + % cv.ShapeContextDistanceExtractor.OuterRadius, default 2 % * __Iterations__ see - % cv.ShapeContextDistanceExtractor.Iterations, default 3 + % cv.ShapeContextDistanceExtractor.Iterations, default 3 % * __CostExtractor__ an algorithm that defines the cost matrix - % between descriptors, specified as - % `{comparerType, 'OptionName',optionValue, ...}`. - % See cv.ShapeContextDistanceExtractor.setCostExtractor, - % where `comparerType` is one of: - % * __NormHistogramCostExtractor__ - % * __EMDHistogramCostExtractor__ - % * __ChiHistogramCostExtractor__ (default) - % * __EMDL1HistogramCostExtractor__ + % between descriptors, specified as + % `{comparerType, 'OptionName',optionValue, ...}`. See + % cv.ShapeContextDistanceExtractor.setCostExtractor, where + % `comparerType` is one of: + % * __NormHistogramCostExtractor__ + % * __EMDHistogramCostExtractor__ + % * __ChiHistogramCostExtractor__ (default) + % * __EMDL1HistogramCostExtractor__ % * __TransformAlgorithm__ an algorithm that defines the aligning - % transformation, specified as - % `{transformerType, 'OptionName',optionValue, ...}`. - % See cv.ShapeContextDistanceExtractor.setTransformAlgorithm, - % where `transformerType` is one of: - % * __ThinPlateSplineShapeTransformer__ (default) - % * __AffineTransformer__ + % transformation, specified as + % `{transformerType, 'OptionName',optionValue, ...}`. See + % cv.ShapeContextDistanceExtractor.setTransformAlgorithm, where + % `transformerType` is one of: + % * __ThinPlateSplineShapeTransformer__ (default) + % * __AffineTransformer__ % % See also: cv.ShapeContextDistanceExtractor.computeDistance % @@ -119,7 +120,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ShapeContextDistanceExtractor % @@ -130,13 +131,13 @@ function delete(this) function setImages(this, image1, image2) %SETIMAGES Set the images that correspond to each shape, used in the calculation of the Image Appearance cost % - % obj.setImages(image1, image2) + % obj.setImages(image1, image2) % % ## Input % * __image1__ Image corresponding to the shape defined by - % `contours1`. + % `contours1`. % * __image2__ Image corresponding to the shape defined by - % `contours2`. + % `contours2`. % % See also: cv.ShapeContextDistanceExtractor.getImages % @@ -146,20 +147,19 @@ function setImages(this, image1, image2) function setCostExtractor(this, comparerType, varargin) %SETCOSTEXTRACTOR Set the algorithm used for building the shape context descriptor cost matrix % - % obj.setCostExtractor(comparerType) - % obj.setCostExtractor(comparerType, 'OptionName',optionValue,...) + % obj.setCostExtractor(comparerType) + % obj.setCostExtractor(comparerType, 'OptionName',optionValue,...) % % ## Input % * __comparerType__ an algorithm that defines the cost matrix - % between descriptors. One of: - % * __NormHistogramCostExtractor__ A norm based cost - % extraction. See cv.norm - % * __EMDHistogramCostExtractor__ An EMD based cost - % extraction. See cv.EMD - % * __ChiHistogramCostExtractor__ An Chi based cost - % extraction. - % * __EMDL1HistogramCostExtractor__ An EMD-L1 based - % cost extraction. See cv.EMDL1 + % between descriptors. One of: + % * __NormHistogramCostExtractor__ A norm based cost extraction. + % See cv.norm + % * __EMDHistogramCostExtractor__ An EMD based cost extraction. + % See cv.EMD + % * __ChiHistogramCostExtractor__ An Chi based cost extraction. + % * __EMDL1HistogramCostExtractor__ An EMD-L1 based cost + % extraction. See cv.EMDL1 % % ## Options % The following are options accepted by all algorithms: @@ -171,8 +171,8 @@ function setCostExtractor(this, comparerType, varargin) % % ### `NormHistogramCostExtractor`, `EMDHistogramCostExtractor` % * __NormFlag__ default 'L2'. This parameter matches the - % `NormType` and `DistType` flags of cv.norm and cv.EMD - % respectively. + % `NormType` and `DistType` flags of cv.norm and cv.EMD + % respectively. % % See also: cv.ShapeContextDistanceExtractor.getCostExtractor % @@ -182,25 +182,24 @@ function setCostExtractor(this, comparerType, varargin) function setTransformAlgorithm(this, transformerType, varargin) %SETTRANSFORMALGORITHM Set the algorithm used for aligning the shapes % - % obj.setTransformAlgorithm(transformerType) - % obj.setTransformAlgorithm(transformerType, 'OptionName',optionValue,...) + % obj.setTransformAlgorithm(transformerType) + % obj.setTransformAlgorithm(transformerType, 'OptionName',optionValue,...) % % ## Input % * __transformerType__ an algorithm that defines the aligning - % transformation. One of: - % * __ThinPlateSplineShapeTransformer__ Definition of the - % transformation occupied in the paper [Bookstein89]. - % * __AffineTransformer__ Wrapper class for the OpenCV - % Affine Transformation algorithm. - % See cv.estimateRigidTransform + % transformation. One of: + % * __ThinPlateSplineShapeTransformer__ Definition of the + % transformation occupied in the paper [Bookstein89]. + % * __AffineTransformer__ Wrapper class for the OpenCV Affine + % Transformation algorithm. See cv.estimateRigidTransform % % ## Options % The following are options for the various algorithms: % % ### `ThinPlateSplineShapeTransformer` % * __RegularizationParameter__ The regularization parameter for - % relaxing the exact interpolation requirements of the TPS - % algorithm. default 0 + % relaxing the exact interpolation requirements of the TPS + % algorithm. default 0 % % ### `AffineTransformer` % * __FullAffine__ see cv.estimateRigidTransform, default true @@ -218,13 +217,13 @@ function setTransformAlgorithm(this, transformerType, varargin) function [image1, image2] = getImages(this) %GETIMAGES Get the images that correspond to each shape, used in the calculation of the Image Appearance cost % - % [image1, image2] = obj.getImages() + % [image1, image2] = obj.getImages() % % ## Output % * __image1__ Image corresponding to the shape defined by - % `contours1`. + % `contours1`. % * __image2__ Image corresponding to the shape defined by - % `contours2`. + % `contours2`. % % See also: cv.ShapeContextDistanceExtractor.setImages % @@ -234,7 +233,7 @@ function setTransformAlgorithm(this, transformerType, varargin) function value = getCostExtractor(this) %GETCOSTEXTRACTOR Get the current algorithm used for building the shape context descriptor cost matrix % - % value = obj.getCostExtractor() + % value = obj.getCostExtractor() % % ## Output % * __value__ output scalar struct @@ -247,7 +246,7 @@ function setTransformAlgorithm(this, transformerType, varargin) function value = getTransformAlgorithm(this) %GETTRANSFORMALGORITHM Get the current algorithm used for aligning the shapes % - % value = obj.getTransformAlgorithm() + % value = obj.getTransformAlgorithm() % % ## Output % * __value__ output scalar struct @@ -263,14 +262,14 @@ function setTransformAlgorithm(this, transformerType, varargin) function dist = computeDistance(this, contour1, contour2) %COMPUTEDISTANCE Compute the shape distance between two shapes defined by its contours % - % dist = obj.computeDistance(contour1, contour2) + % dist = obj.computeDistance(contour1, contour2) % % ## Options % * __contour1__ Contour defining first shape. A numeric - % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points - % `{[x,y], ...}` + % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points + % `{[x,y], ...}` % * __contour2__ Contour defining second shape. Same format as - % `contours1`. + % `contours1`. % % ## Output % * __dist__ output distance. @@ -286,7 +285,7 @@ function setTransformAlgorithm(this, transformerType, varargin) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.ShapeContextDistanceExtractor.empty, % cv.ShapeContextDistanceExtractor.load @@ -297,11 +296,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.ShapeContextDistanceExtractor.clear, % cv.ShapeContextDistanceExtractor.load @@ -312,7 +311,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -328,21 +327,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -356,11 +355,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ShapeContextDistanceExtractor.save, % cv.ShapeContextDistanceExtractor.load diff --git a/+cv/ShapeTransformer.m b/+cv/ShapeTransformer.m index b2865685a..e4497d915 100644 --- a/+cv/ShapeTransformer.m +++ b/+cv/ShapeTransformer.m @@ -6,7 +6,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent, Hidden) @@ -25,26 +26,25 @@ function this = ShapeTransformer(transformerType, varargin) %SHAPETRANSFORMER Constructor % - % obj = cv.ShapeTransformer(transformerType) - % obj = cv.ShapeTransformer(transformerType, 'OptionName',optionValue, ...) + % obj = cv.ShapeTransformer(transformerType) + % obj = cv.ShapeTransformer(transformerType, 'OptionName',optionValue, ...) % % ## Input % * __transformerType__ an algorithm that defines the aligning - % transformation. One of: - % * __ThinPlateSplineShapeTransformer__ Definition of the - % transformation occupied in the paper [Bookstein89]. - % * __AffineTransformer__ Wrapper class for the OpenCV - % Affine Transformation algorithm. - % See cv.estimateRigidTransform, cv.warpAffine, and - % cv.transform + % transformation. One of: + % * __ThinPlateSplineShapeTransformer__ Definition of the + % transformation occupied in the paper [Bookstein89]. + % * __AffineTransformer__ Wrapper class for the OpenCV Affine + % Transformation algorithm. See cv.estimateRigidTransform, + % cv.warpAffine, and cv.transform % % ## Options % The following are options for the various algorithms: % % ### `ThinPlateSplineShapeTransformer` % * __RegularizationParameter__ The regularization parameter for - % relaxing the exact interpolation requirements of the TPS - % algorithm. default 0 + % relaxing the exact interpolation requirements of the TPS + % algorithm. default 0 % % ### `AffineTransformer` % * __FullAffine__ see cv.estimateRigidTransform, default true @@ -62,7 +62,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ShapeTransformer % @@ -76,14 +76,14 @@ function delete(this) function estimateTransformation(this, transformingShape, targetShape, matches) %ESTIMATETRANSFORMATION Estimate the transformation parameters of the current transformer algorithm, based on point matches % - % obj.estimateTransformation(transformingShape, targetShape, matches) + % obj.estimateTransformation(transformingShape, targetShape, matches) % % ## Input % * __transformingShape__ Contour defining first shape. A numeric - % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points - % `{[x,y], ...}` + % Nx2/Nx1x2/1xNx2 array or a cell-array of 2D points + % `{[x,y], ...}` % * __targetShape__ Contour defining second shape (target). Same - % format as `transformingShape`. + % format as `transformingShape`. % * __matches__ Standard vector of matches between points. % % See also: cv.ShapeTransformer.applyTransformation, @@ -95,7 +95,7 @@ function estimateTransformation(this, transformingShape, targetShape, matches) function [cost, output] = applyTransformation(this, input) %APPLYTRANSFORMATION Apply a transformation, given a pre-estimated transformation parameters % - % [cost, output] = obj.applyTransformation(input) + % [cost, output] = obj.applyTransformation(input) % % ## Input % * __input__ Contour (set of points) to apply the transformation. @@ -112,8 +112,8 @@ function estimateTransformation(this, transformingShape, targetShape, matches) function output = warpImage(this, transformingImage, varargin) %WARPIMAGE Apply a transformation, given a pre-estimated transformation parameters, to an Image % - % output = obj.warpImage(transformingImage) - % output = obj.warpImage(..., 'OptionName',optionValue, ...) + % output = obj.warpImage(transformingImage) + % output = obj.warpImage(..., 'OptionName',optionValue, ...) % % ## Input % * __transformingImage__ Input image. @@ -139,7 +139,7 @@ function estimateTransformation(this, transformingShape, targetShape, matches) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.ShapeTransformer.empty, cv.ShapeTransformer.load % @@ -149,11 +149,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.ShapeTransformer.clear, cv.ShapeTransformer.load % @@ -163,7 +163,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -179,21 +179,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -207,11 +207,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ShapeTransformer.save, cv.ShapeTransformer.load % diff --git a/+cv/SimilarRects.m b/+cv/SimilarRects.m index 026e29bd1..b6efa0ac0 100644 --- a/+cv/SimilarRects.m +++ b/+cv/SimilarRects.m @@ -1,7 +1,7 @@ %SIMILARRECTS Class for grouping object candidates, detected by Cascade Classifier, HOG etc. % -% b = cv.SimilarRects(r1, r2) -% b = cv.SimilarRects(r1, r2, 'OptionName',optionValue, ...) +% b = cv.SimilarRects(r1, r2) +% b = cv.SimilarRects(r1, r2, 'OptionName',optionValue, ...) % % ## Input % * __r1__ First input rectangle, 4-element vector `[x,y,w,h]`. @@ -12,7 +12,7 @@ % % ## Options % * __EPS__ Relative difference between sides of the rectangles to consider -% similar. default 0.2 +% similar. default 0.2 % % See also: cv.groupRectangles, cv.groupRectangles_meanshift % diff --git a/+cv/SimpleBlobDetector.m b/+cv/SimpleBlobDetector.m index 188e469f7..9e890f80d 100644 --- a/+cv/SimpleBlobDetector.m +++ b/+cv/SimpleBlobDetector.m @@ -42,15 +42,16 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SimpleBlobDetector(varargin) %SIMPLEBLOBDETECTOR Constructor % - % obj = cv.SimpleBlobDetector() - % obj = cv.SimpleBlobDetector(..., 'OptionName',optionValue, ...) + % obj = cv.SimpleBlobDetector() + % obj = cv.SimpleBlobDetector(..., 'OptionName',optionValue, ...) % % ## Options % * __ThresholdStep__ default 10 @@ -81,7 +82,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SimpleBlobDetector % @@ -92,7 +93,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -106,7 +107,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SimpleBlobDetector.empty, % cv.SimpleBlobDetector.load @@ -117,11 +118,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SimpleBlobDetector.clear, % cv.SimpleBlobDetector.load @@ -132,7 +133,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -148,21 +149,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -176,11 +177,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SimpleBlobDetector.save, cv.SimpleBlobDetector.load % @@ -193,28 +194,26 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image where - % keypoints (blobs) are detected. + % keypoints (blobs) are detected. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.SimpleBlobDetector.SimpleBlobDetector % diff --git a/+cv/Sobel.m b/+cv/Sobel.m index cefe328ed..f1fed5df4 100644 --- a/+cv/Sobel.m +++ b/+cv/Sobel.m @@ -1,36 +1,36 @@ %SOBEL Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator % -% dst = cv.Sobel(src) -% dst = cv.Sobel(..., 'OptionName', optionValue, ...) +% dst = cv.Sobel(src) +% dst = cv.Sobel(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ input image. % % ## Output % * __dst__ output image of the same size and the same number of channels as -% `src`. +% `src`. % % ## Options % * __KSize__ size of the extended Sobel kernel. Aperture size used to compute -% the second-derivative filters. The size must be positive and odd; it -% must be 1, 3, 5, or 7. default 3 +% the second-derivative filters. The size must be positive and odd; it must +% be 1, 3, 5, or 7. default 3 % * __XOrder__ Order of the derivative x. default 1 % * __YOrder__ Order of the derivative y. default 1 % * __Scale__ Optional scale factor for the computed derivative values. By -% default, no scaling is applied (see cv.getDerivKernels for -% details). default 1 +% default, no scaling is applied (see cv.getDerivKernels for details). +% default 1 % * __Delta__ Optional delta value that is added to the results prior to -% storing them in `dst`. default 0 +% storing them in `dst`. default 0 % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % * __DDepth__ output image depth, see combinations below; in the case of -% 8-bit input images it will result in truncated derivatives. -% When `DDepth=-1` (default), the output image will have the same depth -% as the source. -% * SDepth = 'uint8' --> DDepth = -1, 'int16', 'single', 'double' -% * SDepth = 'uint16', 'int16' --> DDepth = -1, 'single', 'double' -% * SDepth = 'single' --> DDepth = -1, 'single', 'double' -% * SDepth = 'double' --> DDepth = -1, 'double' +% 8-bit input images it will result in truncated derivatives. When +% `DDepth=-1` (default), the output image will have the same depth as the +% source. +% * `SDepth = uint8 --> DDepth = -1, int16, single, double` +% * `SDepth = uint16, int16 --> DDepth = -1, single, double` +% * `SDepth = single --> DDepth = -1, single, double` +% * `SDepth = double --> DDepth = -1, double` % % In all cases except one, the `KSize x KSize` separable kernel is used to % calculate the derivative. When `KSize=1`, the `3x1` or `1x3` kernel is used @@ -41,16 +41,16 @@ % corresponds to the `3x3` Scharr filter that may give more accurate results % than the `3x3` Sobel. The Scharr aperture is: % -% [-3 0 3; -10 0 10; -3 0 3] +% [-3 0 3; -10 0 10; -3 0 3] % % for the x-derivative, or transposed for the y-derivative. % % The function calculates an image derivative by convolving the image with the % appropriate kernel: % -% partial^(XOrder+YOrder) src -% dst = --------------------------------------- -% partial x^(XOrder) partial y^(YOrder) +% partial^(XOrder+YOrder) src +% dst = --------------------------------------- +% partial x^(XOrder) partial y^(YOrder) % % The Sobel operators combine Gaussian smoothing and differentiation, so the % result is more or less resistant to the noise. Most often, the function is @@ -58,11 +58,11 @@ % `YOrder = 1`, `KSize = 3`) to calculate the first x- or y- image derivative. % The first case corresponds to a kernel of: % -% [-1 0 1; -2 0 2; -1 0 1] +% [-1 0 1; -2 0 2; -1 0 1] % % The second case corresponds to a kernel of: % -% [-1 -2 -1; 0 0 0; 1 2 1] +% [-1 -2 -1; 0 0 0; 1 2 1] % % See also: cv.Scharr, cv.Laplacian, cv.sepFilter2D, cv.filter2D, % cv.GaussianBlur, cv.cartToPolar, edge, imfilter, fspecial diff --git a/+cv/SparsePyrLKOpticalFlow.m b/+cv/SparsePyrLKOpticalFlow.m index 82275638a..8e0cebe9c 100644 --- a/+cv/SparsePyrLKOpticalFlow.m +++ b/+cv/SparsePyrLKOpticalFlow.m @@ -8,7 +8,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -29,7 +30,7 @@ function this = SparsePyrLKOpticalFlow() %SPARSEPYRLKOPTICALFLOW Creates instance of SparsePyrLKOpticalFlow % - % obj = cv.SparsePyrLKOpticalFlow() + % obj = cv.SparsePyrLKOpticalFlow() % % See also: cv.SparsePyrLKOpticalFlow.calc % @@ -39,7 +40,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SparsePyrLKOpticalFlow % @@ -53,30 +54,29 @@ function delete(this) function varargout = calc(this, prevImg, nextImg, prevPts, varargin) %CALC Calculates a sparse optical flow % - % nextPts = obj.calc(prevImg, nextImg, prevPts) - % [nextPts, status, err] = obj.calc(...) - % [...] = obj.calc(..., 'OptionName', optionValue, ...) + % nextPts = obj.calc(prevImg, nextImg, prevPts) + % [nextPts, status, err] = obj.calc(...) + % [...] = obj.calc(..., 'OptionName', optionValue, ...) % % ## Input % * __prevImg__ First input image. % * __nextImg__ Second input image of the same size and the same - % type as `prevImg`. + % type as `prevImg`. % * __prevPts__ Vector of 2D points for which the flow needs to be - % found. + % found. % % ## Output % * __nextPts__ Output vector of 2D points containing the - % calculated new positions of input features in the second - % image. + % calculated new positions of input features in the second image. % * __status__ Output status vector. Each element of the vector is - % set to 1 if the flow for the corresponding features has - % been found. Otherwise, it is set to 0. + % set to 1 if the flow for the corresponding features has been + % found. Otherwise, it is set to 0. % * __err__ Optional output vector that contains error response - % for each point (inverse confidence). + % for each point (inverse confidence). % % ## Options % * __InitialFlow__ Vector of 2D points to be used for the initial - % estimate of `nextPts`. Not set by default. + % estimate of `nextPts`. Not set by default. % % See also: cv.SparsePyrLKOpticalFlow, cv.calcOpticalFlowPyrLK % @@ -89,7 +89,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SparsePyrLKOpticalFlow.empty % @@ -99,11 +99,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.SparsePyrLKOpticalFlow.clear % @@ -113,11 +113,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SparsePyrLKOpticalFlow.save, cv.SparsePyrLKOpticalFlow.load % @@ -127,7 +127,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -142,21 +142,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/StereoBM.m b/+cv/StereoBM.m index 0ec5b360d..dddd3d2ce 100644 --- a/+cv/StereoBM.m +++ b/+cv/StereoBM.m @@ -6,9 +6,9 @@ % % ## Usage % - % bm = cv.StereoBM('NumDisparities',64, ...); - % bm.MinDisparity = 0; - % disparity = bm.compute(left, right); + % bm = cv.StereoBM('NumDisparities',64, ...); + % bm.MinDisparity = 0; + % disparity = bm.compute(left, right); % % See also: cv.StereoBM.StereoBM, cv.StereoBM.compute, cv.StereoSGBM, % cv.getValidDisparityROI, cv.filterSpeckles, cv.validateDisparity, @@ -68,22 +68,21 @@ function this = StereoBM(varargin) %STEREOBM Creates StereoBM object % - % bm = cv.StereoBM() - % bm = cv.StereoBM('OptionName', optionValue, ...) + % bm = cv.StereoBM() + % bm = cv.StereoBM('OptionName', optionValue, ...) % % ## Options % * __NumDisparities__ the disparity search range. For each pixel - % algorithm will find the best disparity from 0 (default - % minimum disparity) to `NumDisparities`. The search range - % can then be shifted by changing the minimum disparity. - % default 0 (which uses 64 by default). + % algorithm will find the best disparity from 0 (default minimum + % disparity) to `NumDisparities`. The search range can then be + % shifted by changing the minimum disparity. default 0 (which + % uses 64 by default). % * __BlockSize__ the linear size of the blocks compared by the - % algorithm. The size should be odd (as the block is - % centered at the current pixel). Larger block size implies - % smoother, though less accurate disparity map. Smaller - % block size gives more detailed disparity map, but there is - % is higher chance for algorithm to find a wrong - % correspondence. default 21. + % algorithm. The size should be odd (as the block is centered at + % the current pixel). Larger block size implies smoother, though + % less accurate disparity map. Smaller block size gives more + % detailed disparity map, but there is is higher chance for + % algorithm to find a wrong correspondence. default 21. % % The function creates cv.StereoBM object. You can then call % cv.StereoBM.compute to compute disparity for a specific stereo @@ -97,7 +96,7 @@ function delete(this) %DELETE Destructor % - % bm.delete() + % bm.delete() % % See also: cv.StereoBM % @@ -108,20 +107,19 @@ function delete(this) function disparity = compute(this, left, right) %COMPUTE Computes disparity map for the specified stereo pair % - % disparity = bm.compute(left, right) + % disparity = bm.compute(left, right) % % ## Input % * __left__ Left 8-bit single-channel image. % * __right__ Right image of the same size and the same type as - % the left one. + % the left one. % % ## Output % * __disparity__ Output disparity map. It has the same size as - % the input images. Some algorithms, like cv.StereoBM or - % cv.StereoSGBM compute 16-bit fixed-point disparity map - % (where each disparity value has 4 fractional bits), - % whereas other algorithms output 32-bit floating-point - % disparity map. + % the input images. Some algorithms, like cv.StereoBM or + % cv.StereoSGBM compute 16-bit fixed-point disparity map (where + % each disparity value has 4 fractional bits), whereas other + % algorithms output 32-bit floating-point disparity map. % % The method executes the BM algorithm on a rectified stereo pair. % Note that the method is not constant, thus you should not use @@ -139,7 +137,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StereoBM.empty % @@ -149,11 +147,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if algorithm object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm object is empty - % (e.g. in the very beginning or after unsuccessful read). + % (e.g. in the very beginning or after unsuccessful read). % % See also: cv.StereoBM.clear % @@ -163,11 +161,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StereoBM.save, cv.StereoBM.load % @@ -177,7 +175,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -192,21 +190,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/StereoSGBM.m b/+cv/StereoSGBM.m index 81c4e99b4..6a1827adf 100644 --- a/+cv/StereoSGBM.m +++ b/+cv/StereoSGBM.m @@ -23,9 +23,9 @@ % % ## Usage % - % bm = cv.StereoSGBM('MinDisparity',0, ...); - % bm.BlockSize = ...; - % disparity = bm.compute(left, right); + % bm = cv.StereoSGBM('MinDisparity',0, ...); + % bm.BlockSize = ...; + % disparity = bm.compute(left, right); % % ## References % [HH08]: @@ -83,7 +83,7 @@ % % * __SGBM__ 5-directional version of the algorithm % * __HH__ 8-dir mode, runs the full-scale two-pass dynamic - % programming algorithm (slowest, memory intensive) + % programming algorithm (slowest, memory intensive) % * __SGBM3Way__ 3-dir mode (fastest) % * __HH4__ 4-directional variation of SGBM Mode @@ -93,60 +93,58 @@ function this = StereoSGBM(varargin) %STEREOSGBM Creates StereoSGBM object % - % bm = cv.StereoSGBM() - % bm = cv.StereoSGBM('OptionName', optionValue, ...) + % bm = cv.StereoSGBM() + % bm = cv.StereoSGBM('OptionName', optionValue, ...) % % ## Options % * __MinDisparity__ Minimum possible disparity value. Normally, - % it is zero but sometimes rectification algorithms can - % shift images, so this parameter needs to be adjusted - % accordingly. default 0 + % it is zero but sometimes rectification algorithms can shift + % images, so this parameter needs to be adjusted accordingly. + % default 0 % * __NumDisparities__ Maximum disparity minus minimum disparity. - % The value is always greater than zero. In the current - % implementation, this parameter must be divisible by 16. - % default 16 + % The value is always greater than zero. In the current + % implementation, this parameter must be divisible by 16. + % default 16 % * __BlockSize__ Matched block size. It must be an odd number >=1. - % Normally, it should be somewhere in the 3..11 range. - % default 3 + % Normally, it should be somewhere in the 3..11 range. default 3 % * __P1__ The first parameter controlling the disparity - % smoothness. See `P2`. default 0 (which uses 2). + % smoothness. See `P2`. default 0 (which uses 2). % * __P2__ The second parameter controlling the disparity - % smoothness. The larger the values are, the smoother the - % disparity is. `P1` is the penalty on the disparity change - % by plus or minus 1 between neighbor pixels. `P2` is the - % penalty on the disparity change by more than 1 between - % neighbor pixels. The algorithm requires `P2 > P1`. - % (Reasonably good `P1` and `P2` values are like - % `8*number_of_image_channels*BlockSize*BlockSize` and - % `32*number_of_image_channels*BlockSize*BlockSize` - % respectively). default 0 (which uses `max(5,P1+1)`). + % smoothness. The larger the values are, the smoother the + % disparity is. `P1` is the penalty on the disparity change by + % plus or minus 1 between neighbor pixels. `P2` is the penalty + % on the disparity change by more than 1 between neighbor + % pixels. The algorithm requires `P2 > P1`. (Reasonably good + % `P1` and `P2` values are like + % `8*number_of_image_channels*BlockSize*BlockSize` and + % `32*number_of_image_channels*BlockSize*BlockSize` + % respectively). default 0 (which uses `max(5,P1+1)`). % * __Disp12MaxDiff__ Maximum allowed difference (in integer pixel - % units) in the left-right disparity check. Set it to a - % non-positive value to disable the check. default 0 + % units) in the left-right disparity check. Set it to a + % non-positive value to disable the check. default 0 % * __PreFilterCap__ Truncation value for the prefiltered image - % pixels. The algorithm first computes x-derivative at each - % pixel and clips its value by `[-PreFilterCap,PreFilterCap]` - % interval. The result values are passed to the - % Birchfield-Tomasi pixel cost function. default 0 + % pixels. The algorithm first computes x-derivative at each + % pixel and clips its value by `[-PreFilterCap,PreFilterCap]` + % interval. The result values are passed to the + % Birchfield-Tomasi pixel cost function. default 0 % * __UniquenessRatio__ Margin in percentage by which the best - % (minimum) computed cost function value should "win" the - % second best value to consider the found match correct. - % Normally, a value within the 5-15 range is good enough. - % A negative value uses 10. default 0 + % (minimum) computed cost function value should "win" the second + % best value to consider the found match correct. Normally, a + % value within the 5-15 range is good enough. A negative value + % uses 10. default 0 % * __SpeckleWindowSize__ Maximum size of smooth disparity regions - % to consider their noise speckles and invalidate. Set it to - % 0 to disable speckle filtering. Otherwise, set it - % somewhere in the 50-200 range. default 0 + % to consider their noise speckles and invalidate. Set it to 0 + % to disable speckle filtering. Otherwise, set it somewhere in + % the 50-200 range. default 0 % * __SpeckleRange__ Maximum disparity variation within each - % connected component. If you do speckle filtering, set the - % parameter to a positive value, it will be implicitly - % multiplied by 16. Normally, 1 or 2 is good enough. - % default 0 + % connected component. If you do speckle filtering, set the + % parameter to a positive value, it will be implicitly + % multiplied by 16. Normally, 1 or 2 is good enough. default 0 % * __Mode__ Set it to 'HH' to run the full-scale two-pass dynamic - % programming algorithm. It will consume - % `O(W * H * NumDisparities)` bytes, which is large for - % 640x480 stereo and huge for HD-size pictures. By default, - % it is set to 'SGBM'. + % programming algorithm. It will consume + % `O(W * H * NumDisparities)` bytes, which is large for 640x480 + % stereo and huge for HD-size pictures. By default, it is set to + % 'SGBM'. % % See also: cv.StereoSGBM.compute % @@ -156,7 +154,7 @@ function delete(this) %DELETE Destructor % - % bm.delete() + % bm.delete() % % See also: cv.StereoSGBM % @@ -167,20 +165,19 @@ function delete(this) function disparity = compute(this, left, right) %COMPUTE Computes disparity map for the specified stereo pair % - % disparity = bm.compute(left, right) + % disparity = bm.compute(left, right) % % ## Input % * __left__ Left 8-bit single-channel or 3-channel image. % * __right__ Right image of the same size and the same type as - % the left one. + % the left one. % % ## Output % * __disparity__ Output disparity map. It has the same size as - % the input images. Some algorithms, like cv.StereoBM or - % cv.StereoSGBM compute 16-bit fixed-point disparity map - % (where each disparity value has 4 fractional bits), - % whereas other algorithms output 32-bit floating-point - % disparity map. + % the input images. Some algorithms, like cv.StereoBM or + % cv.StereoSGBM compute 16-bit fixed-point disparity map (where + % each disparity value has 4 fractional bits), whereas other + % algorithms output 32-bit floating-point disparity map. % % The method executes the SGBM algorithm on a rectified stereo % pair. @@ -196,7 +193,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StereoSGBM.empty % @@ -206,11 +203,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if algorithm object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm object is empty - % (e.g. in the very beginning or after unsuccessful read). + % (e.g. in the very beginning or after unsuccessful read). % % See also: cv.StereoSGBM.clear % @@ -220,11 +217,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StereoSGBM.save, cv.StereoSGBM.load % @@ -234,7 +231,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -249,21 +246,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/+cv/Stitcher.m b/+cv/Stitcher.m index c77629b0d..596421aac 100644 --- a/+cv/Stitcher.m +++ b/+cv/Stitcher.m @@ -6,7 +6,7 @@ % stability and quality of the final images at least being familiar with % the theory is recommended. % - % ![image](http://docs.opencv.org/3.1.0/StitchingPipeline.jpg) + % ![image](https://docs.opencv.org/3.3.1/StitchingPipeline.jpg) % % This figure illustrates the stitching module pipeline implemented in the % cv.Stitcher class. Using that class it's possible to configure/remove @@ -54,7 +54,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -77,24 +78,24 @@ function this = Stitcher(varargin) %STITCHER Creates a Stitcher configured in one of the stitching modes % - % obj = cv.Stitcher() - % obj = cv.Stitcher('OptionName',optionValue, ...) + % obj = cv.Stitcher() + % obj = cv.Stitcher('OptionName',optionValue, ...) % % ## Options % * __Mode__ Scenario for stitcher operation. This is usually - % determined by source of images to stitch and their - % transformation. Default parameters will be chosen for - % operation in given scenario. Default 'Panorama'. One of: - % * __Panorama__ Mode for creating photo panoramas. Expects - % images under perspective transformation and projects - % resulting pano to sphere. See also - % `BestOf2NearestMatcher`, `SphericalWarper`. - % * __Scans__ Mode for composing scans. Expects images under - % affine transformation does not compensate exposure - % by default. See also `AffineBestOf2NearestMatcher`, - % `AffineWarper` + % determined by source of images to stitch and their + % transformation. Default parameters will be chosen for + % operation in given scenario. Default 'Panorama'. One of: + % * __Panorama__ Mode for creating photo panoramas. Expects + % images under perspective transformation and projects + % resulting pano to sphere. See also `BestOf2NearestMatcher`, + % `SphericalWarper`. + % * __Scans__ Mode for composing scans. Expects images under + % affine transformation does not compensate exposure by + % default. See also `AffineBestOf2NearestMatcher`, + % `AffineWarper` % * __TryUseGPU__ Flag indicating whether GPU should be used - % whenever it's possible. default false + % whenever it's possible. default false % % See also: cv.Stitcher.stitch % @@ -104,7 +105,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Stitcher % @@ -117,28 +118,28 @@ function delete(this) function pano = stitch(this, images, varargin) %STITCH Tries to stitch the given images % - % pano = obj.stitch(images) - % pano = obj.stitch(images, rois) - % [pano,status] = obj.stitch(...) + % pano = obj.stitch(images) + % pano = obj.stitch(images, rois) + % [pano, status] = obj.stitch(...) % % ## Input % * __images__ Input cell-array of images. % * __rois__ Optional region of interest rectangles, a cell-array - % of cell-arrays of 4-elements vectors - % `{{[x,y,w,h], ...}, ...}` or `{[x,y,w,h; ...], ...}`. + % of cell-arrays of 4-elements vectors `{{[x,y,w,h], ...}, ...}` + % or `{[x,y,w,h; ...], ...}`. % % ## Output % * __pano__ Final pano. % * __status__ optional output status code. If not requested, the - % function throws an error if the operation fails. A string - % one of: - % * __OK__ - % * **ERR_NEED_MORE_IMGS** - % * **ERR_HOMOGRAPHY_EST_FAIL** - % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** + % function throws an error if the operation fails. A string one + % of: + % * __OK__ + % * **ERR_NEED_MORE_IMGS** + % * **ERR_HOMOGRAPHY_EST_FAIL** + % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** % - % The function throws an error if the stitch function returns - % a non-OK status code. + % The function throws an error if the stitch function returns a + % non-OK status code. % % See also: cv.Stitcher.estimateTransform, % cv.Stitcher.composePanorama @@ -149,24 +150,24 @@ function delete(this) function estimateTransform(this, images, varargin) %ESTIMATETRANSFORM Estimate transformation % - % obj.estimateTransform(images) - % obj.estimateTransform(images, rois) - % status = obj.estimateTransform(...) + % obj.estimateTransform(images) + % obj.estimateTransform(images, rois) + % status = obj.estimateTransform(...) % % ## Input % * __images__ Input cell-array of images. % * __rois__ Optional region of interest rectangles, a cell-array - % of cell-arrays of 4-elements vectors - % `{{[x,y,w,h], ...}, ...}` or `{[x,y,w,h; ...], ...}`. + % of cell-arrays of 4-elements vectors `{{[x,y,w,h], ...}, ...}` + % or `{[x,y,w,h; ...], ...}`. % % ## Output % * __status__ optional output status code. If not requested, the - % function throws an error if the operation fails. A string - % one of: - % * __OK__ - % * **ERR_NEED_MORE_IMGS** - % * **ERR_HOMOGRAPHY_EST_FAIL** - % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** + % function throws an error if the operation fails. A string one + % of: + % * __OK__ + % * **ERR_NEED_MORE_IMGS** + % * **ERR_HOMOGRAPHY_EST_FAIL** + % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** % % This function tries to match the given images and to estimate % rotations of each camera. @@ -182,9 +183,9 @@ function estimateTransform(this, images, varargin) function pano = composePanorama(this, varargin) %COMPOSEPANORAMA Compose panorama % - % pano = obj.composePanorama() - % pano = obj.composePanorama(images) - % [pano,status] = obj.composePanorama(...) + % pano = obj.composePanorama() + % pano = obj.composePanorama(images) + % [pano, status] = obj.composePanorama(...) % % ## Input % * __images__ Input cell-array of images. @@ -192,12 +193,12 @@ function estimateTransform(this, images, varargin) % ## Output % * __pano__ Final pano. % * __status__ optional output status code. If not requested, the - % function throws an error if the operation fails. A string - % one of: - % * __OK__ - % * **ERR_NEED_MORE_IMGS** - % * **ERR_HOMOGRAPHY_EST_FAIL** - % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** + % function throws an error if the operation fails. A string one + % of: + % * __OK__ + % * **ERR_NEED_MORE_IMGS** + % * **ERR_HOMOGRAPHY_EST_FAIL** + % * **ERR_CAMERA_PARAMS_ADJUST_FAIL** % % This function tries to compose the given images (or images % stored internally from the other function calls) into the final @@ -215,7 +216,7 @@ function estimateTransform(this, images, varargin) function indices = component(this) %COMPONENT Image indices % - % indices = obj.component() + % indices = obj.component() % % ## Output % * __indices__ Vector of integer indices (0-based). @@ -228,18 +229,18 @@ function estimateTransform(this, images, varargin) function params = cameras(this) %CAMERAS Estimates camera parameters % - % params = obj.cameras() + % params = obj.cameras() % % ## Output % * __params__ Describes camera parameters, a struct-array with - % the following fields: - % * __aspect__ Aspect ratio. - % * __focal__ Focal length. - % * __ppx__ Principal point X. - % * __ppy__ Principal point Y. - % * __R__ 3x3 camera rotation matrix. - % * __t__ 3x1 camera translation vector. - % * __K__ 3x3 camera intrinsic parameters. + % the following fields: + % * __aspect__ Aspect ratio. + % * __focal__ Focal length. + % * __ppx__ Principal point X. + % * __ppy__ Principal point Y. + % * __R__ 3x3 camera rotation matrix. + % * __t__ 3x1 camera translation vector. + % * __K__ 3x3 camera intrinsic parameters. % % Note: Translation is assumed to be zero during the whole % stitching pipeline. @@ -252,7 +253,7 @@ function estimateTransform(this, images, varargin) function wscale = workScale(this) %WORKSCALE Work scale % - % wscale = obj.workScale() + % wscale = obj.workScale() % % ## Output % * __wscale__ scalar double value. @@ -267,7 +268,7 @@ function estimateTransform(this, images, varargin) function mask = getMatchingMask(this) %GETMATCHINGMASK % - % mask = obj.getMatchingMask() + % mask = obj.getMatchingMask() % % ## Output % * __mask__ @@ -280,7 +281,7 @@ function estimateTransform(this, images, varargin) function setMatchingMask(this, mask) %SETMATCHINGMASK % - % obj.setMatchingMask(mask) + % obj.setMatchingMask(mask) % % ## Input % * __mask__ @@ -293,7 +294,7 @@ function setMatchingMask(this, mask) function value = getFeaturesFinder(this) %GETFEATURESFINDER Get the features finder % - % value = obj.getFeaturesFinder() + % value = obj.getFeaturesFinder() % % ## Output % * __value__ output scalar struct. @@ -306,18 +307,17 @@ function setMatchingMask(this, mask) function setFeaturesFinder(this, finderType, varargin) %SETFEATURESFINDER Set the features finder % - % obj.setFeaturesFinder(finderType) - % obj.setFeaturesFinder(finderType, 'OptionName',optionValue, ...) + % obj.setFeaturesFinder(finderType) + % obj.setFeaturesFinder(finderType, 'OptionName',optionValue, ...) % % ## Input % * __finderType__ Feature finder type. One of: - % * __OrbFeaturesFinder__ ORB features finder. See cv.ORB - % * __AKAZEFeaturesFinder__ AKAZE features finder. See - % cv.AKAZE - % * __SurfFeaturesFinder__ SURF features finder. See cv.SURF - % (requires `xfeatures2d` module) - % * __SurfFeaturesFinderGpu__ (requires CUDA and - % `xfeatures2d` module) + % * __OrbFeaturesFinder__ ORB features finder. See cv.ORB + % * __AKAZEFeaturesFinder__ AKAZE features finder. See cv.AKAZE + % * __SurfFeaturesFinder__ SURF features finder. See cv.SURF + % (requires `xfeatures2d` module) + % * __SurfFeaturesFinderGpu__ (requires CUDA and `xfeatures2d` + % module) % % ## Options % The following are options for the various finders: @@ -355,7 +355,7 @@ function setFeaturesFinder(this, finderType, varargin) function value = getFeaturesMatcher(this) %GETFEATURESMATCHER Get the features matcher % - % value = obj.getFeaturesMatcher() + % value = obj.getFeaturesMatcher() % % ## Output % * __value__ output scalar struct. @@ -368,27 +368,26 @@ function setFeaturesFinder(this, finderType, varargin) function setFeaturesMatcher(this, matcherType, varargin) %SETFEATURESMATCHER Set the features matcher % - % obj.setFeaturesMatcher(matcherType) - % obj.setFeaturesMatcher(matcherType, 'OptionName',optionValue, ...) + % obj.setFeaturesMatcher(matcherType) + % obj.setFeaturesMatcher(matcherType, 'OptionName',optionValue, ...) % % ## Input % * __matcherType__ Feature matcher type. One of: - % * __BestOf2NearestMatcher__ A "best of 2 nearest" matcher. - % Features matcher which finds two best matches for - % each feature and leaves the best one only if the - % ratio between descriptor distances is greater than - % the threshold `MatchConf`. - % * __BestOf2NearestRangeMatcher__ - % * __AffineBestOf2NearestMatcher__ A "best of 2 nearest" - % matcher that expects affine trasformation between - % images. Features matcher similar to - % `BestOf2NearestMatcher` which finds two best matches - % for each feature and leaves the best one only if the - % ratio between descriptor distances is greater than - % the threshold `MatchConf`. - % Unlike `BestOf2NearestMatcher` this matcher uses - % affine transformation (affine trasformation estimate - % will be placed in `matches_info`). + % * __BestOf2NearestMatcher__ A "best of 2 nearest" matcher. + % Features matcher which finds two best matches for each + % feature and leaves the best one only if the ratio between + % descriptor distances is greater than the threshold + % `MatchConf`. + % * __BestOf2NearestRangeMatcher__ + % * __AffineBestOf2NearestMatcher__ A "best of 2 nearest" + % matcher that expects affine trasformation between images. + % Features matcher similar to `BestOf2NearestMatcher` which + % finds two best matches for each feature and leaves the best + % one only if the ratio between descriptor distances is + % greater than the threshold `MatchConf`. Unlike + % `BestOf2NearestMatcher` this matcher uses affine + % transformation (affine trasformation estimate will be placed + % in `matches_info`). % % ## Options % The following are options accepted by all matchers: @@ -396,11 +395,11 @@ function setFeaturesMatcher(this, matcherType, varargin) % * __TryUseGPU__ Should try to use GPU or not. default false % * __MatchConf__ Match distances ration threshold. default 0.3 % * __NumMatchesThresh1__ Minimum number of matches required for - % the 2D projective transform estimation used in the inliers - % classification step. default 6 + % the 2D projective transform estimation used in the inliers + % classification step. default 6 % * __NumMatchesThresh2__ Minimum number of matches required for - % the 2D projective transform re-estimation on inliers. - % default 6 + % the 2D projective transform re-estimation on inliers. + % default 6 % % The following are options for the various algorithms: % @@ -409,9 +408,9 @@ function setFeaturesMatcher(this, matcherType, varargin) % % ### `AffineBestOf2NearestMatcher` % * __FullAffine__ whether to use full affine transformation with - % 6 degress of freedom or reduced transformation with - % 4 degrees of freedom using only rotation, translation and - % uniform scaling. default false + % 6 degress of freedom or reduced transformation with 4 degrees + % of freedom using only rotation, translation and uniform + % scaling. default false % % The class uses `BestOf2NearestMatcher` by default. % @@ -424,7 +423,7 @@ function setFeaturesMatcher(this, matcherType, varargin) function value = getEstimator(this) %GETESTIMATOR Get the estimator % - % value = obj.getEstimator() + % value = obj.getEstimator() % % ## Output % * __value__ output scalar struct. @@ -437,17 +436,17 @@ function setFeaturesMatcher(this, matcherType, varargin) function setEstimator(this, estimatorType, varargin) %SETESTIMATOR Set the estimator % - % obj.setEstimator(estimatorType) - % obj.setEstimator(estimatorType, 'OptionName',optionValue, ...) + % obj.setEstimator(estimatorType) + % obj.setEstimator(estimatorType, 'OptionName',optionValue, ...) % % ## Input % * __estimatorType__ Estimator type. One of: - % * __HomographyBasedEstimator__ Homography based rotation - % estimator. - % * __AffineBasedEstimator__ Affine transformation based - % estimator. This estimator uses pairwise - % tranformations estimated by matcher to estimate - % final transformation for each camera. + % * __HomographyBasedEstimator__ Homography based rotation + % estimator. + % * __AffineBasedEstimator__ Affine transformation based + % estimator. This estimator uses pairwise tranformations + % estimated by matcher to estimate final transformation for + % each camera. % % The following are options for the various algorithms: % @@ -463,7 +462,7 @@ function setEstimator(this, estimatorType, varargin) function value = getBundleAdjuster(this) %GETBUNDLEADJUSTER Get the bundle adjuster % - % value = obj.getBundleAdjuster() + % value = obj.getBundleAdjuster() % % ## Output % * __value__ output scalar struct. @@ -476,42 +475,35 @@ function setEstimator(this, estimatorType, varargin) function setBundleAdjuster(this, adjusterType, varargin) %SETBUNDLEADJUSTER Set the bundle adjuster % - % obj.setBundleAdjuster(adjusterType) - % obj.setBundleAdjuster(adjusterType, 'OptionName',optionValue, ...) + % obj.setBundleAdjuster(adjusterType) + % obj.setBundleAdjuster(adjusterType, 'OptionName',optionValue, ...) % % ## Input % * __adjusterType__ camera parameters refinement method. One of: - % * __NoBundleAdjuster__ Stub bundle adjuster that does - % nothing. - % * __BundleAdjusterRay__ Implementation of the camera - % parameters refinement algorithm which minimizes sum - % of the distances between the rays passing through - % the camera center and a feature. - % It can estimate focal length. It ignores the - % refinement mask for now. - % * __BundleAdjusterReproj__ Implementation of the camera - % parameters refinement algorithm which minimizes sum - % of the reprojection error squares. - % It can estimate focal length, aspect ratio, - % principal point. You can affect only on them via the - % refinement mask. - % * __BundleAdjusterAffine__ Bundle adjuster that expects - % affine transformation represented in homogeneous - % coordinates in R for each camera param. Implements - % camera parameters refinement algorithm which - % minimizes sum of the reprojection error squares. - % It estimates all transformation parameters. - % Refinement mask is ignored. See also - % cv.AffineBasedEstimator, - % `AffineBestOf2NearestMatcher`. - % * __BundleAdjusterAffinePartial__ Bundle adjuster that - % expects affine transformation with 4 DOF represented - % in homogeneous coordinates in R for each camera - % param. Implements camera parameters refinement - % algorithm which minimizes sum of the reprojection - % error squares. - % It estimates all transformation parameters. - % Refinement mask is ignored. + % * __NoBundleAdjuster__ Stub bundle adjuster that does nothing. + % * __BundleAdjusterRay__ Implementation of the camera + % parameters refinement algorithm which minimizes sum of the + % distances between the rays passing through the camera center + % and a feature. It can estimate focal length. It ignores the + % refinement mask for now. + % * __BundleAdjusterReproj__ Implementation of the camera + % parameters refinement algorithm which minimizes sum of the + % reprojection error squares. It can estimate focal length, + % aspect ratio, principal point. You can affect only on them + % via the refinement mask. + % * __BundleAdjusterAffine__ Bundle adjuster that expects affine + % transformation represented in homogeneous coordinates in R + % for each camera param. Implements camera parameters + % refinement algorithm which minimizes sum of the reprojection + % error squares. It estimates all transformation parameters. + % Refinement mask is ignored. See also + % cv.AffineBasedEstimator, `AffineBestOf2NearestMatcher`. + % * __BundleAdjusterAffinePartial__ Bundle adjuster that expects + % affine transformation with 4 DOF represented in homogeneous + % coordinates in R for each camera param. Implements camera + % parameters refinement algorithm which minimizes sum of the + % reprojection error squares. It estimates all transformation + % parameters. Refinement mask is ignored. % % ## Options % The following are options accepted by all adjusters: @@ -519,7 +511,7 @@ function setBundleAdjuster(this, adjusterType, varargin) % * __ConfThresh__ default 1 % * __RefinementMask__ default `eye(3)` % * __TermCriteria__ default - % `struct('type','Count+EPS', 'maxCount',1000, 'epsilon',eps)` + % `struct('type','Count+EPS', 'maxCount',1000, 'epsilon',eps)` % % The class uses `BundleAdjusterRay` by default. % @@ -531,7 +523,7 @@ function setBundleAdjuster(this, adjusterType, varargin) function value = getWarper(this) %GETWARPER Get the warper % - % value = obj.getWarper() + % value = obj.getWarper() % % ## Output % * __value__ output scalar struct. @@ -544,40 +536,37 @@ function setBundleAdjuster(this, adjusterType, varargin) function setWarper(this, warperType, varargin) %SETWARPER Set the image warper % - % obj.setWarper(warperType) - % obj.setWarper(warperType, 'OptionName',optionValue, ...) + % obj.setWarper(warperType) + % obj.setWarper(warperType, 'OptionName',optionValue, ...) % % ## Input % * __warperType__ image warper factory class type, used to create - % the rotation-based warper. One of: - % * __PlaneWarper__ Plane warper factory class. Warper that - % maps an image onto the `z = 1` plane. - % * __PlaneWarperGpu__ (requires CUDA) - % * __AffineWarper__ Affine warper factory class. Affine - % warper that uses rotations and translations. Uses - % affine transformation in homogeneous coordinates to - % represent both rotation and translation in camera - % rotation matrix. - % * __CylindricalWarper__ Cylindrical warper factory class. - % Warper that maps an image onto the `x*x + z*z = 1` - % cylinder. - % * __CylindricalWarperGpu__ (requires CUDA) - % * __SphericalWarper__ Warper that maps an image onto the - % unit sphere located at the origin. Projects image - % onto unit sphere with origin at [0,0,0] and radius - % `scale`, measured in pixels. A 360 panorama would - % therefore have a resulting width of `2*scale*pi` - % pixels. Poles are located at [0,-1,0] and [0,1,0] - % points. - % * __SphericalWarperGpu__ (requires CUDA) - % * __FisheyeWarper__ - % * __StereographicWarper__ - % * __CompressedRectilinearWarper__ - % * __CompressedRectilinearPortraitWarper__ - % * __PaniniWarper__ - % * __PaniniPortraitWarper__ - % * __MercatorWarper__ - % * __TransverseMercatorWarper__ + % the rotation-based warper. One of: + % * __PlaneWarper__ Plane warper factory class. Warper that maps + % an image onto the `z = 1` plane. + % * __PlaneWarperGpu__ (requires CUDA) + % * __AffineWarper__ Affine warper factory class. Affine warper + % that uses rotations and translations. Uses affine + % transformation in homogeneous coordinates to represent both + % rotation and translation in camera rotation matrix. + % * __CylindricalWarper__ Cylindrical warper factory class. + % Warper that maps an image onto the `x*x + z*z = 1` cylinder. + % * __CylindricalWarperGpu__ (requires CUDA) + % * __SphericalWarper__ Warper that maps an image onto the unit + % sphere located at the origin. Projects image onto unit + % sphere with origin at [0,0,0] and radius `scale`, measured + % in pixels. A 360 panorama would therefore have a resulting + % width of `2*scale*pi` pixels. Poles are located at [0,-1,0] + % and [0,1,0] points. + % * __SphericalWarperGpu__ (requires CUDA) + % * __FisheyeWarper__ + % * __StereographicWarper__ + % * __CompressedRectilinearWarper__ + % * __CompressedRectilinearPortraitWarper__ + % * __PaniniWarper__ + % * __PaniniPortraitWarper__ + % * __MercatorWarper__ + % * __TransverseMercatorWarper__ % % ## Options % The following are options for the various warpers: @@ -596,7 +585,7 @@ function setWarper(this, warperType, varargin) function value = getExposureCompensator(this) %GETEXPOSURECOMPENSATOR Get the exposire compensator % - % value = obj.getExposureCompensator() + % value = obj.getExposureCompensator() % % ## Output % * __value__ output scalar struct. @@ -609,20 +598,19 @@ function setWarper(this, warperType, varargin) function setExposureCompensator(this, compensatorType, varargin) %SETEXPOSURECOMPENSATOR Set the exposure compensator % - % obj.setExposureCompensator(compensatorType) - % obj.setExposureCompensator(compensatorType, 'OptionName',optionValue, ...) + % obj.setExposureCompensator(compensatorType) + % obj.setExposureCompensator(compensatorType, 'OptionName',optionValue, ...) % % ## Input % * __compensatorType__ exposure compensator type. One of: - % * __NoExposureCompensator__ Stub exposure compensator - % which does nothing. - % * __GainCompensator__ Exposure compensator which tries to - % remove exposure related artifacts by adjusting image - % intensities, see [BL07] and [WJ10] for details. - % * __BlocksGainCompensator__ Exposure compensator which - % tries to remove exposure related artifacts by - % adjusting image block intensities, see [UES01] for - % details. + % * __NoExposureCompensator__ Stub exposure compensator which + % does nothing. + % * __GainCompensator__ Exposure compensator which tries to + % remove exposure related artifacts by adjusting image + % intensities, see [BL07] and [WJ10] for details. + % * __BlocksGainCompensator__ Exposure compensator which tries + % to remove exposure related artifacts by adjusting image + % block intensities, see [UES01] for details. % % ## Options % The following are options for the various compensators: @@ -661,7 +649,7 @@ function setExposureCompensator(this, compensatorType, varargin) function value = getSeamFinder(this) %GETSEAMFINDER Get the seam finder % - % value = obj.getSeamFinder() + % value = obj.getSeamFinder() % % ## Output % * __value__ output scalar struct. @@ -674,31 +662,31 @@ function setExposureCompensator(this, compensatorType, varargin) function setSeamFinder(this, seamType, varargin) %SETSEAMFINDER Set the seam finder % - % obj.setSeamFinder(seamType) - % obj.setSeamFinder(seamType, 'OptionName',optionValue, ...) + % obj.setSeamFinder(seamType) + % obj.setSeamFinder(seamType, 'OptionName',optionValue, ...) % % ## Input % * __seamType__ seam estimator type. One of: - % * __NoSeamFinder__ Stub seam estimator which does nothing. - % * __VoronoiSeamFinder__ Voronoi diagram-based pairwise - % seam estimator. - % * __DpSeamFinder__ - % * __GraphCutSeamFinder__ Minimum graph cut-based seam - % estimator. See details in [V03]. - % * __GraphCutSeamFinderGpu__ (requires CUDA) + % * __NoSeamFinder__ Stub seam estimator which does nothing. + % * __VoronoiSeamFinder__ Voronoi diagram-based pairwise seam + % estimator. + % * __DpSeamFinder__ + % * __GraphCutSeamFinder__ Minimum graph cut-based seam + % estimator. See details in [V03]. + % * __GraphCutSeamFinderGpu__ (requires CUDA) % % ## Options % The following are options for the various seam finders: % % ### `DpSeamFinder` % * __CostFunction__ default 'Color'. One of: - % * __Color__ - % * __ColorGrad__ + % * __Color__ + % * __ColorGrad__ % % ### `GraphCutSeamFinder` % * __CostType__ default 'ColorGrad'. One of: - % * __Color__ - % * __ColorGrad__ + % * __Color__ + % * __ColorGrad__ % * __TerminalCost__ default 10000.0 % * __BadRegionPenaly__ default 1000.0 % @@ -719,7 +707,7 @@ function setSeamFinder(this, seamType, varargin) function value = getBlender(this) %GETBLENDER Get the blender % - % value = obj.getBlender() + % value = obj.getBlender() % % ## Output % * __value__ output scalar struct. @@ -732,17 +720,17 @@ function setSeamFinder(this, seamType, varargin) function setBlender(this, blenderType, varargin) %SETBLENDER Set the blender % - % obj.setBlender(blenderType) - % obj.setBlender(blenderType, 'OptionName',optionValue, ...) + % obj.setBlender(blenderType) + % obj.setBlender(blenderType, 'OptionName',optionValue, ...) % % ## Input % * __blenderType__ image blender type. One of: - % * __NoBlender__ Simple blender which puts one image over - % another. - % * __FeatherBlender__ Simple blender which mixes images at - % its borders. - % * __MultiBandBlender__ Blender which uses multi-band - % blending algorithm (see [BA83]). + % * __NoBlender__ Simple blender which puts one image over + % another. + % * __FeatherBlender__ Simple blender which mixes images at its + % borders. + % * __MultiBandBlender__ Blender which uses multi-band blending + % algorithm (see [BA83]). % % ## Options % The following are options for the various blenders: @@ -754,8 +742,8 @@ function setBlender(this, blenderType, varargin) % * __TryGPU__ default false % * __NumBands__ default 5 % * __WeightType__ One of: - % * __single__ (default) - % * __int16__ + % * __single__ (default) + % * __int16__ % % The class uses `MultiBandBlender` by default. % diff --git a/+cv/Subdiv2D.m b/+cv/Subdiv2D.m index 7b5070c3c..a727b0a05 100644 --- a/+cv/Subdiv2D.m +++ b/+cv/Subdiv2D.m @@ -21,19 +21,20 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = Subdiv2D(varargin) %SUBDIV2D Constructor % - % obj = cv.Subdiv2D() - % obj = cv.Subdiv2D(rect) + % obj = cv.Subdiv2D() + % obj = cv.Subdiv2D(rect) % % ## Input % * __rect__ Rectangle `[x,y,w,h]` that includes all of the 2D - % points that are to be added to the subdivision. + % points that are to be added to the subdivision. % % Creates an empty Delaunay subdivision object. % The second form is equivalent to creating an empty object, then @@ -52,7 +53,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Subdiv2D % @@ -63,11 +64,11 @@ function delete(this) function initDelaunay(this, rect) %INITDELAUNAY Initialize % - % obj.initDelaunay(rect) + % obj.initDelaunay(rect) % % ## Input % * __rect__ Rectangle `[x,y,w,h]` that includes all of the 2D - % points that are to be added to the subdivision. + % points that are to be added to the subdivision. % % See also: cv.Subdiv2D.Subdiv2D % @@ -77,9 +78,9 @@ function initDelaunay(this, rect) function varargout = insert(this, pt) %INSERT Insert a single point or multiple points into a Delaunay triangulation % - % obj.insert(pt) - % curr_point = obj.insert(pt) - % obj.insert(pts) + % obj.insert(pt) + % curr_point = obj.insert(pt) + % obj.insert(pts) % % ## Input % * __pt__ Point to insert `[x,y]` @@ -108,33 +109,32 @@ function initDelaunay(this, rect) function [location, edge, vertex] = locate(this, pt) %LOCATE Returns the location of a point within a Delaunay triangulation % - % [location, edge, vertex] = obj.locate(pt) + % [location, edge, vertex] = obj.locate(pt) % % ## Input % * __pt__ Point `[x,y]` to locate. % % ## Output % * __location__ a string which specifies one of the following - % five cases for point location: - % * __Inside__ The point falls into some facet. The function - % returns 'Inside' and edge will contain one of edges - % of the facet. - % * __OnEdge__ The point falls onto the edge. The function - % returns 'OnEdge' and edge will contain this edge. - % * __Vertex__ The point coincides with one of the - % subdivision vertices. The function returns 'Vertex' - % and vertex will contain a pointer to the vertex. - % * __OutsideRect__ The point is outside the subdivision - % reference rectangle. The function returns - % 'OutsideRect' and no other outputs are filled. - % * __Error__ Point location error. One of input arguments - % is invalid. A runtime error is raised or, if silent - % or "parent" error processing mode is selected, - % 'Error' is returned. + % five cases for point location: + % * __Inside__ The point falls into some facet. The function + % returns 'Inside' and edge will contain one of edges of the + % facet. + % * __OnEdge__ The point falls onto the edge. The function + % returns 'OnEdge' and edge will contain this edge. + % * __Vertex__ The point coincides with one of the subdivision + % vertices. The function returns 'Vertex' and vertex will + % contain a pointer to the vertex. + % * __OutsideRect__ The point is outside the subdivision + % reference rectangle. The function returns 'OutsideRect' and + % no other outputs are filled. + % * __Error__ Point location error. One of input arguments is + % invalid. A runtime error is raised or, if silent or "parent" + % error processing mode is selected, 'Error' is returned. % * __edge__ Output edge that the point belongs to or is located - % to the right of it. + % to the right of it. % * __vertex__ Optional output vertex the input point coincides - % with. + % with. % % The function locates the input point within the subdivision and % gives one of the triangle edges or vertices. @@ -147,7 +147,7 @@ function initDelaunay(this, rect) function [vertex, nearestPt] = findNearest(this, pt) %FINDNEAREST Finds the subdivision vertex closest to the given point % - % [vertex, nearestPt] = obj.findNearest(pt) + % [vertex, nearestPt] = obj.findNearest(pt) % % ## Input % * __pt__ Input point `[x,y]` @@ -171,7 +171,7 @@ function initDelaunay(this, rect) function edgeList = getEdgeList(this) %GETEDGELIST Returns a list of all edges % - % edgeList = obj.getEdgeList() + % edgeList = obj.getEdgeList() % % ## Output % * __edgeList__ Output vector `{[p1x,p1y, p2x,p2y], ...}`. @@ -188,7 +188,7 @@ function initDelaunay(this, rect) function leadingEdgeList = getLeadingEdgeList(this) %GETLEADINGEDGELIST Returns a list of the leading edge ID connected to each triangle % - % leadingEdgeList = obj.getLeadingEdgeList() + % leadingEdgeList = obj.getLeadingEdgeList() % % ## Output % * __leadingEdgeList__ Output vector. @@ -203,11 +203,11 @@ function initDelaunay(this, rect) function triangleList = getTriangleList(this) %GETTRIANGLELIST Returns a list of all triangles % - % triangleList = obj.getTriangleList() + % triangleList = obj.getTriangleList() % % ## Output % * __triangleList__ Output vector - % `{[p1x,p1y, p2x,p2y, p3x,p3y], ...}`. + % `{[p1x,p1y, p2x,p2y, p3x,p3y], ...}`. % % The function gives each triangle as a 6 numbers vector, where % each two are one of the triangle vertices. i.e. `p1_x = v[0]`, @@ -222,17 +222,17 @@ function initDelaunay(this, rect) function [facetList, facetCenters] = getVoronoiFacetList(this, idx) %GETVORONOIFACETLIST Returns a list of all Voroni facets % - % [facetList, facetCenters] = obj.getVoronoiFacetList(idx) + % [facetList, facetCenters] = obj.getVoronoiFacetList(idx) % % ## Input % * __idx__ Vector of vertices IDs to consider. For all vertices - % you can pass empty vector. + % you can pass empty vector. % % ## Output % * __facetList__ Output vector of the Voroni facets, an array of - % array of points `{{[x,y], ...}, ...}`. + % array of points `{{[x,y], ...}, ...}`. % * __facetCenters__ Output vector of the Voroni facets center - % points, a vector of points `{[x,y], ...}`. + % points, a vector of points `{[x,y], ...}`. % % See also: cv.Subdiv2D.getEdgeList, cv.Subdiv2D.getTriangleList % @@ -242,7 +242,7 @@ function initDelaunay(this, rect) function [pt, firstEdge] = getVertex(this, vertex) %GETVERTEX Returns vertex location from vertex ID % - % [pt, firstEdge] = obj.getVertex(vertex) + % [pt, firstEdge] = obj.getVertex(vertex) % % ## Input % * __vertex__ vertex ID. @@ -250,7 +250,7 @@ function initDelaunay(this, rect) % ## Output % * __pt__ vertex `[x,y]`. % * __firstEdge__ Optional. The first edge ID which is connected - % to the vertex. + % to the vertex. % % See also: cv.Subdiv2D.getEdge % @@ -260,30 +260,29 @@ function initDelaunay(this, rect) function e = getEdge(this, edge, nextEdgeType) %GETEDGE Returns one of the edges related to the given edge % - % e = obj.getEdge(edge, nextEdgeType) + % e = obj.getEdge(edge, nextEdgeType) % % ## Input % * __edge__ Subdivision edge ID. % * __nextEdgeType__ Parameter specifying which of the related - % edges to return. The following edge type navigation values - % are possible: - % * __NextAroundOrg__ next around the edge origin (`eOnext` - % on the picture below if `e` is the input edge). - % * __NextAroundDst__ next around the edge vertex (`eDnext`). - % * __PrevAroundOrg__ previous around the edge origin - % (reversed `eRnext`). - % * __PrevAroundDst__ previous around the edge destination - % (reversed `eLnext`). - % * __NextAroundLeft__ next around the left facet (`eLnext`). - % * __NextAroundRight__ next around the right facet - % (`eRnext`). - % * __PrevAroundLeft__ previous around the left facet - % (reversed `eOnext`). - % * __PrevAroundRight__ previous around the right facet - % (reversed `eDnext`). + % edges to return. The following edge type navigation values are + % possible: + % * __NextAroundOrg__ next around the edge origin (`eOnext` on + % the picture below if `e` is the input edge). + % * __NextAroundDst__ next around the edge vertex (`eDnext`). + % * __PrevAroundOrg__ previous around the edge origin + % (reversed `eRnext`). + % * __PrevAroundDst__ previous around the edge destination + % (reversed `eLnext`). + % * __NextAroundLeft__ next around the left facet (`eLnext`). + % * __NextAroundRight__ next around the right facet (`eRnext`). + % * __PrevAroundLeft__ previous around the left facet + % (reversed `eOnext`). + % * __PrevAroundRight__ previous around the right facet + % (reversed `eDnext`). % % ## Output - % * __e__ edge ID related to the input edge. + % * __e__ edge ID related to the input edge. % % A sample output is shown below: % @@ -297,14 +296,14 @@ function initDelaunay(this, rect) function e = nextEdge(this, edge) %NEXTEDGE Returns next edge around the edge origin % - % e = obj.nextEdge(edge) + % e = obj.nextEdge(edge) % % ## Input % * __edge__ Subdivision edge ID. % % ## Output % * __e__ an integer which is next edge ID around the edge origin - % (`eOnext` on the picture shown if `e` is the input edge). + % (`eOnext` on the picture shown if `e` is the input edge). % % See also: cv.Subdiv2D.getEdge % @@ -314,23 +313,22 @@ function initDelaunay(this, rect) function e = rotateEdge(this, edge, rotate) %ROTATEEDGE Returns another edge of the same quad-edge % - % e = obj.rotateEdge(edge, rotate) + % e = obj.rotateEdge(edge, rotate) % % ## Input % * __edge__ Subdivision edge ID. % * __rotate__ Parameter specifying which of the edges of the same - % quad-edge as the input one to return. The following values - % are possible: - % * __0__ the input edge (`e` on the picture shown if `e` is - % the input edge). - % * __1__ the rotated edge (`eRot`). - % * __2__ the reversed edge (reversed `e` (in green)). - % * __3__ the reversed rotated edge (reversed `eRot` - % (in green)). + % quad-edge as the input one to return. The following values are + % possible: + % * __0__ the input edge (`e` on the picture shown if `e` is the + % input edge). + % * __1__ the rotated edge (`eRot`). + % * __2__ the reversed edge (reversed `e` (in green)). + % * __3__ the reversed rotated edge (reversed `eRot` (in green)). % % ## Output % * __e__ one of the edges ID of the same quad-edge as the input - % edge. + % edge. % % See also: cv.Subdiv2D.getEdge % @@ -340,7 +338,7 @@ function initDelaunay(this, rect) function e = symEdge(this, edge) %SYMEDGE Sym edge % - % e = obj.symEdge(edge) + % e = obj.symEdge(edge) % % ## Input % * __edge__ Subdivision edge ID. @@ -356,7 +354,7 @@ function initDelaunay(this, rect) function [e, orgpt] = edgeOrg(this, edge) %EDGEORG Returns the edge origin % - % [e, orgpt] = obj.edgeOrg(edge) + % [e, orgpt] = obj.edgeOrg(edge) % % ## Input % * __edge__ Subdivision edge ID. @@ -373,7 +371,7 @@ function initDelaunay(this, rect) function [e, dstpt] = edgeDst(this, edge) %EDGEDST Returns the edge destination % - % [e, dstpt] = obj.edgeDst(edge) + % [e, dstpt] = obj.edgeDst(edge) % % ## Input % * __edge__ Subdivision edge ID. diff --git a/+cv/SuperResolution.m b/+cv/SuperResolution.m index a03e50218..95d292439 100644 --- a/+cv/SuperResolution.m +++ b/+cv/SuperResolution.m @@ -8,16 +8,16 @@ % % ## Example % - % superres = cv.SuperResolution(); - % superres.Scale = 2; % 2x scale - % superres.Iterations = 10; % careful alg is computationally expensive! - % superres.setOpticalFlow('FarnebackOpticalFlow', 'LevelsNumber',3); - % superres.setInput('Video', 'video.avi'); - % while true - % tic, frame = superres.nextFrame(); toc - % if isempty(frame), break; end - % imshow(frame), drawnow - % end + % superres = cv.SuperResolution(); + % superres.Scale = 2; % 2x scale + % superres.Iterations = 10; % careful alg is computationally expensive! + % superres.setOpticalFlow('FarnebackOpticalFlow', 'LevelsNumber',3); + % superres.setInput('Video', 'video.avi'); + % while true + % tic, frame = superres.nextFrame(); toc + % if isempty(frame), break; end + % imshow(frame), drawnow + % end % % ## References % [Farsiu03]: @@ -35,7 +35,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -64,12 +65,12 @@ function this = SuperResolution(superresType) %SUPERRESOLUTION Create Bilateral TV-L1 Super Resolution % - % obj = cv.SuperResolution(superresType) + % obj = cv.SuperResolution(superresType) % % ## Input % * __superresType__ Super resolution algorithm type, one of: - % * __BTVL1__ Bilateral TV-L1 on CPU. This is the default. - % * **BTVL1_CUDA** Bilateral TV-L1 on GPU (requires CUDA). + % * __BTVL1__ Bilateral TV-L1 on CPU. This is the default. + % * **BTVL1_CUDA** Bilateral TV-L1 on GPU (requires CUDA). % % This class implements Super Resolution algorithm described in % the papers [Farsiu03] and [Mitzel09]. @@ -100,7 +101,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SuperResolution % @@ -111,7 +112,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Clear all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % % See also: cv.SuperResolution.SuperResolution % @@ -121,23 +122,23 @@ function collectGarbage(this) function setInput(this, frameSourceType, varargin) %SETINPUT Set input frame source for Super Resolution algorithm % - % obj.setInput(frameSourceType, ...) + % obj.setInput(frameSourceType, ...) % - % obj.setInput('Camera', deviceId) - % obj.setInput('Video', filename) + % obj.setInput('Camera', deviceId) + % obj.setInput('Video', filename) % % ## Input % * __frameSourceType__ Input frame source type. One of: - % * __Camera__ wrapper around cv.VideoCapture with a camera - % device as source. - % * __Video__ wrapper around cv.VideoCapture with a video - % file as source. + % * __Camera__ wrapper around cv.VideoCapture with a camera + % device as source. + % * __Video__ wrapper around cv.VideoCapture with a video file + % as source. % * __deviceId__ id of the opened video capturing device (i.e. a - % camera index). If there is a single camera connected, just - % pass 0. default 0 + % camera index). If there is a single camera connected, just + % pass 0. default 0 % * __filename__ name of the opened video file (eg. `video.avi`) - % or image sequence (eg. `img_%02d.jpg`, which will read - % samples like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) + % or image sequence (eg. `img_%02d.jpg`, which will read samples + % like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) % % See also: cv.SuperResolution.nextFrame % @@ -147,23 +148,23 @@ function setInput(this, frameSourceType, varargin) function setOpticalFlow(this, optFlowType, varargin) %SETOPTICALFLOW Dense optical flow algorithm % - % obj.setOpticalFlow(optFlowType) - % obj.setOpticalFlow(optFlowType, 'OptionName',optionValue, ...) + % obj.setOpticalFlow(optFlowType) + % obj.setOpticalFlow(optFlowType, 'OptionName',optionValue, ...) % % ## Input % * __optFlowType__ Dense optical flow algorithm. One of: - % * __FarnebackOpticalFlow__ wrapper for - % cv.calcOpticalFlowFarneback function. - % * __DualTVL1OpticalFlow__ wrapper for - % cv.DualTVL1OpticalFlow class. - % * __FarnebackOpticalFlowCUDA__ wrapper for - % `cv::cuda::FarnebackOpticalFlow` (requires CUDA) - % * __DualTVL1OpticalFlowCUDA__ wrapper for - % `cv::cuda::OpticalFlowDual_TVL1` (requires CUDA) - % * __BroxOpticalFlowCUDA__ wrapper for - % `cv::cuda::BroxOpticalFlow` (requires CUDA) - % * __PyrLKOpticalFlowCUDA__ wrapper for - % `cv::cuda::DensePyrLKOpticalFlow` (requires CUDA) + % * __FarnebackOpticalFlow__ wrapper for + % cv.calcOpticalFlowFarneback function. + % * __DualTVL1OpticalFlow__ wrapper for cv.DualTVL1OpticalFlow + % class. + % * __FarnebackOpticalFlowCUDA__ wrapper for + % `cv::cuda::FarnebackOpticalFlow` (requires CUDA) + % * __DualTVL1OpticalFlowCUDA__ wrapper for + % `cv::cuda::OpticalFlowDual_TVL1` (requires CUDA) + % * __BroxOpticalFlowCUDA__ wrapper for + % `cv::cuda::BroxOpticalFlow` (requires CUDA) + % * __PyrLKOpticalFlowCUDA__ wrapper for + % `cv::cuda::DensePyrLKOpticalFlow` (requires CUDA) % % ## Options % The following are options for the various algorithms: @@ -215,11 +216,11 @@ function setOpticalFlow(this, optFlowType, varargin) function optFlow = getOpticalFlow(this) %GETOPTICALFLOW Dense optical flow algorithm % - % optFlow = obj.getOpticalFlow() + % optFlow = obj.getOpticalFlow() % % ## Output - % * __optFlow__ output struct containing properties of the - % optical flow algorithm. + % * __optFlow__ output struct containing properties of the optical + % flow algorithm. % % See also: cv.SuperResolution.setOpticalFlow, % cv.calcOpticalFlowPyrLK, cv.DualTVL1OpticalFlow @@ -233,13 +234,13 @@ function setOpticalFlow(this, optFlowType, varargin) function frame = nextFrame(this, varargin) %NEXTFRAME Process next frame from input and return output result % - % frame = obj.nexFrame() - % frame = obj.nexFrame('OptionName',optionValue, ...) + % frame = obj.nexFrame() + % frame = obj.nexFrame('OptionName',optionValue, ...) % % ## Options % * __FlipChannels__ in case the output is color image, flips the - % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA - % order. default false + % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. + % default false % % ## Output % * __frame__ Output result @@ -252,7 +253,7 @@ function setOpticalFlow(this, optFlowType, varargin) function reset(this) %RESET Reset the frame source % - % obj.reset() + % obj.reset() % % See also: cv.SuperResolution.nextFrame % @@ -265,7 +266,7 @@ function reset(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SuperResolution.empty, cv.SuperResolution.load % @@ -275,11 +276,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SuperResolution.clear, cv.SuperResolution.load % @@ -289,11 +290,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SuperResolution.save, cv.SuperResolution.load % @@ -303,7 +304,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -319,21 +320,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TickMeter.m b/+cv/TickMeter.m index 4d9462194..bcbd25606 100644 --- a/+cv/TickMeter.m +++ b/+cv/TickMeter.m @@ -5,11 +5,22 @@ % second. That is, the following code computes the execution time in % seconds: % - % tm = cv.TickMeter(); - % tm.start(); - % % do something ... - % tm.stop(); - % disp(tm.TimeSec) + % tm = cv.TickMeter(); + % tm.start(); + % % do something ... + % tm.stop(); + % disp(tm.TimeSec) + % + % It is also possible to compute the average time over multiple runs: + % + % tm = cv.TickMeter(); + % for i=1:100 + % tm.start(); + % % do something ... + % tm.stop(); + % end + % fprintf('Average time in second per iteration is: %f\n', ... + % tm.TimeSec / double(tm.Counter)) % % See also: tic, toc, cputime, timeit % @@ -36,7 +47,7 @@ function this = TickMeter() %TICKMETER the default constructor % - % obj = cv.TickMeter() + % obj = cv.TickMeter() % % See also: cv.TickMeter.start, cv.TickMeter.reset % @@ -46,6 +57,8 @@ function delete(this) %DELETE Destructor % + % obj.delete() + % % See also: cv.TickMeter % if isempty(this.id), return; end @@ -55,7 +68,7 @@ function delete(this) function start(this) %START Starts counting ticks % - % obj.start() + % obj.start() % % See also: cv.TickMeter.stop % @@ -65,7 +78,7 @@ function start(this) function stop(this) %STOP Stops counting ticks % - % obj.stop() + % obj.stop() % % See also: cv.TickMeter.start % @@ -75,7 +88,7 @@ function stop(this) function reset(this) %RESET Resets internal values % - % obj.reset() + % obj.reset() % % See also: cv.TickMeter.start % @@ -105,4 +118,72 @@ function reset(this) end end + methods (Static) + function t = getTickCount(); + %GETTICKCOUNT Returns the number of ticks. + % + % t = cv.TickMeter.getTickCount() + % + % ## Output + % * __t__ number of ticks. + % + % The function returns the number of ticks after a certain event + % (for example, when the machine was turned on). It can be used to + % initialize RNG or to measure a function execution time by + % reading the tick count before and after the function call. + % + % See also: cv.TickMeter.getTickFrequency, cv.TickMeter + % + t = TickMeter_(0, 'getTickCount'); + end + + function f = getTickFrequency(); + %GETTICKFREQUENCY Returns the number of ticks per second + % + % f = cv.TickMeter.getTickFrequency() + % + % ## Output + % * __f__ number of ticks per second. + % + % The function returns the number of ticks per second. That is, + % the following code computes the execution time in seconds: + % + % t = double(cv.TickMeter.getTickCount()); + % % do something ... + % t = (double(cv.TickMeter.getTickCount()) - t) / cv.TickMeter.getTickFrequency(); + % + % See also: cv.TickMeter.getTickCount, cv.TickMeter + % + f = TickMeter_(0, 'getTickFrequency'); + end + + function t = getCPUTickCount(); + %GETCPUTICKCOUNT Returns the number of CPU ticks + % + % t = cv.TickMeter.getCPUTickCount() + % + % ## Output + % * __t__ number of CPU ticks. + % + % The function returns the current number of CPU ticks on some + % architectures (such as x86, x64, PowerPC). On other platforms + % the function is equivalent to cv.TickMeter.getTickCount. It can + % also be used for very accurate time measurements, as well as for + % RNG initialization. Note that in case of multi-CPU systems a + % thread, from which cv.TickMeter.getCPUTickCount is called, can + % be suspended and resumed at another CPU with its own counter. + % So, theoretically (and practically) the subsequent calls to the + % function do not necessary return the monotonically increasing + % values. Also, since modern CPUs varies the CPU frequency + % depending on the load, the number of CPU clocks spent in some + % code cannot be directly converted to time units. Therefore, + % cv.TickMeter.getTickCount is generally a preferable solution for + % measuring execution time. + % + % See also: cv.TickMeter.getTickFrequency + % + t = TickMeter_(0, 'getCPUTickCount'); + end + end + end diff --git a/+cv/Timelapser.m b/+cv/Timelapser.m index eb57cb29a..22ccb7979 100644 --- a/+cv/Timelapser.m +++ b/+cv/Timelapser.m @@ -8,19 +8,20 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = Timelapser(ttype) %TIMELAPSER Constructor % - % obj = cv.Timelapser(ttype) + % obj = cv.Timelapser(ttype) % % ## Input % * __ttype__ Timelapsing method. One of: - % * __AsIs__ `Timelapser`. - % * __Crop__ `TimelapserCrop`. + % * __AsIs__ `Timelapser`. + % * __Crop__ `TimelapserCrop`. % % See also: cv.Timelapser.process % @@ -30,7 +31,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Timelapser % @@ -41,7 +42,10 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() + % + % ## Output + % * __typename__ Name of C++ type % typename = Timelapser_(this.id, 'typeid'); end @@ -52,7 +56,7 @@ function delete(this) function initialize(this, corners, sizes) %INITIALIZE Initialize % - % obj.initialize(corners, sizes) + % obj.initialize(corners, sizes) % % ## Input % * __corners__ cell array of points `{[x,y], ...}`. @@ -66,7 +70,7 @@ function initialize(this, corners, sizes) function process(this, img, mask, tl) %PROCESS Process % - % obj.process(img, mask, tl) + % obj.process(img, mask, tl) % % ## Input % * __img__ input image, of type `int16`. @@ -81,7 +85,7 @@ function process(this, img, mask, tl) function dst = getDst(this) %GETDST Get Destination % - % dst = obj.getDst() + % dst = obj.getDst() % % ## Output % * __dst__ output destination of type `int16`. diff --git a/+cv/Tonemap.m b/+cv/Tonemap.m index 0cc22ae7b..f4e78d090 100644 --- a/+cv/Tonemap.m +++ b/+cv/Tonemap.m @@ -6,7 +6,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -23,8 +24,8 @@ function this = Tonemap(varargin) %TONEMAP Creates simple linear mapper with gamma correction % - % obj = cv.Tonemap() - % obj = cv.Tonemap('OptionName',optionValue, ...) + % obj = cv.Tonemap() + % obj = cv.Tonemap('OptionName',optionValue, ...) % % ## Options % * __Gamma__ default 1.0 @@ -37,7 +38,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Tonemap % @@ -51,14 +52,14 @@ function delete(this) function dst = process(this, src) %PROCESS Tonemaps image % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ source RGB image, 32-bit `single` 3-channel array. % % ## Output % * __dst__ destination image of same size as `src`, 32-bit - % `single` 3-channel array with values in [0,1] range. + % `single` 3-channel array with values in [0,1] range. % % See also: cv.Tonemap.Tonemap % @@ -71,7 +72,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.Tonemap.empty, cv.Tonemap.load % @@ -81,11 +82,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.Tonemap.clear, cv.Tonemap.load % @@ -95,11 +96,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.Tonemap.save, cv.Tonemap.load % @@ -109,7 +110,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -125,21 +126,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TonemapDrago.m b/+cv/TonemapDrago.m index 9a88d367b..e6119de59 100644 --- a/+cv/TonemapDrago.m +++ b/+cv/TonemapDrago.m @@ -28,7 +28,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -55,8 +56,8 @@ function this = TonemapDrago(varargin) %TONEMAPDRAGO Creates TonemapDrago object % - % obj = cv.TonemapDrago() - % obj = cv.TonemapDrago('OptionName',optionValue, ...) + % obj = cv.TonemapDrago() + % obj = cv.TonemapDrago('OptionName',optionValue, ...) % % ## Options % * __Gamma__ default 1.0 @@ -71,7 +72,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TonemapDrago % @@ -85,14 +86,14 @@ function delete(this) function dst = process(this, src) %PROCESS Tonemaps image % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ source RGB image, 32-bit `single` 3-channel array. % % ## Output % * __dst__ destination image of same size as `src`, 32-bit - % `single` 3-channel array with values in [0,1] range. + % `single` 3-channel array with values in [0,1] range. % % See also: cv.TonemapDrago.TonemapDrago % @@ -105,7 +106,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.TonemapDrago.empty, cv.TonemapDrago.load % @@ -115,11 +116,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.TonemapDrago.clear, cv.TonemapDrago.load % @@ -129,11 +130,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.TonemapDrago.save, cv.TonemapDrago.load % @@ -143,7 +144,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -159,21 +160,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TonemapDurand.m b/+cv/TonemapDurand.m index de4e5f0cf..1abce869f 100644 --- a/+cv/TonemapDurand.m +++ b/+cv/TonemapDurand.m @@ -22,7 +22,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -53,8 +54,8 @@ function this = TonemapDurand(varargin) %TONEMAPDURAND Creates TonemapDurand object % - % obj = cv.TonemapDurand() - % obj = cv.TonemapDurand('OptionName',optionValue, ...) + % obj = cv.TonemapDurand() + % obj = cv.TonemapDurand('OptionName',optionValue, ...) % % ## Options % * __Gamma__ default 1.0 @@ -71,7 +72,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TonemapDurand % @@ -85,14 +86,14 @@ function delete(this) function dst = process(this, src) %PROCESS Tonemaps image % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ source RGB image, 32-bit `single` 3-channel array. % % ## Output % * __dst__ destination image of same size as `src`, 32-bit - % `single` 3-channel array with values in [0,1] range. + % `single` 3-channel array with values in [0,1] range. % % See also: cv.TonemapDurand.TonemapDurand % @@ -105,7 +106,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.TonemapDurand.empty, cv.TonemapDurand.load % @@ -115,11 +116,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.TonemapDurand.clear, cv.TonemapDurand.load % @@ -129,11 +130,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.TonemapDurand.save, cv.TonemapDurand.load % @@ -143,7 +144,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -159,21 +160,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TonemapMantiuk.m b/+cv/TonemapMantiuk.m index 0aadb56fd..6ae3e8f99 100644 --- a/+cv/TonemapMantiuk.m +++ b/+cv/TonemapMantiuk.m @@ -19,7 +19,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -46,8 +47,8 @@ function this = TonemapMantiuk(varargin) %TONEMAPMANTIUK Creates TonemapMantiuk object % - % obj = cv.TonemapMantiuk() - % obj = cv.TonemapMantiuk('OptionName',optionValue, ...) + % obj = cv.TonemapMantiuk() + % obj = cv.TonemapMantiuk('OptionName',optionValue, ...) % % ## Options % * __Gamma__ default 1.0 @@ -62,7 +63,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TonemapMantiuk % @@ -76,14 +77,14 @@ function delete(this) function dst = process(this, src) %PROCESS Tonemaps image % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ source RGB image, 32-bit `single` 3-channel array. % % ## Output % * __dst__ destination image of same size as `src`, 32-bit - % `single` 3-channel array with values in [0,1] range. + % `single` 3-channel array with values in [0,1] range. % % See also: cv.TonemapMantiuk.TonemapMantiuk % @@ -96,7 +97,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.TonemapMantiuk.empty, cv.TonemapMantiuk.load % @@ -106,11 +107,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.TonemapMantiuk.clear, cv.TonemapMantiuk.load % @@ -120,11 +121,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.TonemapMantiuk.save, cv.TonemapMantiuk.load % @@ -134,7 +135,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -150,21 +151,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TonemapReinhard.m b/+cv/TonemapReinhard.m index 955c07f45..5b9cc1c8b 100644 --- a/+cv/TonemapReinhard.m +++ b/+cv/TonemapReinhard.m @@ -20,7 +20,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -51,8 +52,8 @@ function this = TonemapReinhard(varargin) %TONEMAPREINHARD Creates TonemapReinhard object % - % obj = cv.TonemapReinhard() - % obj = cv.TonemapReinhard('OptionName',optionValue, ...) + % obj = cv.TonemapReinhard() + % obj = cv.TonemapReinhard('OptionName',optionValue, ...) % % ## Options % * __Gamma__ default 1.0 @@ -68,7 +69,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TonemapReinhard % @@ -82,14 +83,14 @@ function delete(this) function dst = process(this, src) %PROCESS Tonemaps image % - % dst = obj.process(src) + % dst = obj.process(src) % % ## Input % * __src__ source RGB image, 32-bit `single` 3-channel array. % % ## Output % * __dst__ destination image of same size as `src`, 32-bit - % `single` 3-channel array with values in [0,1] range. + % `single` 3-channel array with values in [0,1] range. % % See also: cv.TonemapReinhard.TonemapReinhard % @@ -102,7 +103,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.TonemapReinhard.empty, cv.TonemapReinhard.load % @@ -112,11 +113,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.TonemapReinhard.clear, cv.TonemapReinhard.load % @@ -126,11 +127,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.TonemapReinhard.save, cv.TonemapReinhard.load % @@ -140,7 +141,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -156,21 +157,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous diff --git a/+cv/TwoPassStabilizer.m b/+cv/TwoPassStabilizer.m index a27857695..d52c3d016 100644 --- a/+cv/TwoPassStabilizer.m +++ b/+cv/TwoPassStabilizer.m @@ -28,7 +28,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -48,7 +49,7 @@ function this = TwoPassStabilizer() %TWOPASSSTABILIZER Constructor % - % obj = cv.TwoPassStabilizer() + % obj = cv.TwoPassStabilizer() % % See also: cv.TwoPassStabilizer.nextFrame % @@ -58,7 +59,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TwoPassStabilizer % @@ -72,13 +73,13 @@ function delete(this) function frame = nextFrame(this, varargin) %NEXTFRAME Process next frame from input and return output result % - % frame = obj.nexFrame() - % frame = obj.nexFrame('OptionName',optionValue, ...) + % frame = obj.nexFrame() + % frame = obj.nexFrame('OptionName',optionValue, ...) % % ## Options % * __FlipChannels__ in case the output is color image, flips the - % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA - % order. default false + % color order from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. + % default false % % ## Output % * __frame__ Output result @@ -91,7 +92,7 @@ function delete(this) function reset(this) %RESET Reset the frame source % - % obj.reset() + % obj.reset() % % See also: cv.TwoPassStabilizer.nextFrame % @@ -104,15 +105,15 @@ function reset(this) function setLog(this, logType) %SETLOG Set logger class for the video stabilizer % - % stab.setLog(logType) + % stab.setLog(logType) % % ## Input % * __logType__ Logging type. One of: - % * __NullLog__ no logging. - % * __LogToStdout__ (default) log messages to standard - % output. Note that standard output is not displayed - % in MATLAB, you should use `LogToMATLAB` instead. - % * __LogToMATLAB__ log messages to MATLAB command window. + % * __NullLog__ no logging. + % * __LogToStdout__ (default) log messages to standard output. + % Note that standard output is not displayed in MATLAB, you + % should use `LogToMATLAB` instead. + % * __LogToMATLAB__ log messages to MATLAB command window. % % The class uses `LogToStdout` by default. % @@ -123,7 +124,7 @@ function setLog(this, logType) function value = getLog(this) %GETLOG Get the current logger class % - % value = stab.getLog() + % value = stab.getLog() % % ## Output % * __value__ output scalar struct @@ -136,20 +137,20 @@ function setLog(this, logType) function setFrameSource(this, frameSourceType, varargin) %SETFRAMESOURCE Set input frame source for the video stabilizer % - % stab.setInput(frameSourceType, ...) + % stab.setInput(frameSourceType, ...) % - % stab.setFrameSource('NullFrameSource') - % stab.setFrameSource('VideoFileSource', filename) - % stab.setFrameSource('VideoFileSource', filename, 'OptionName',optionValue, ...) + % stab.setFrameSource('NullFrameSource') + % stab.setFrameSource('VideoFileSource', filename) + % stab.setFrameSource('VideoFileSource', filename, 'OptionName',optionValue, ...) % % ## Input % * __frameSourceType__ Input frames source type. One of: - % * __NullFrameSource__ - % * __VideoFileSource__ wrapper around cv.VideoCapture with - % a video file or image sequence as source. + % * __NullFrameSource__ + % * __VideoFileSource__ wrapper around cv.VideoCapture with a + % video file or image sequence as source. % * __filename__ name of the opened video file (eg. `video.avi`) - % or image sequence (eg. `img_%02d.jpg`, which will read - % samples like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) + % or image sequence (eg. `img_%02d.jpg`, which will read samples + % like `img_00.jpg`, `img_01.jpg`, `img_02.jpg`, ...) % % ## Options % * __VolatileFrame__ default false @@ -163,7 +164,7 @@ function setFrameSource(this, frameSourceType, varargin) function value = getFrameSource(this) %GETFRAMESOURCE Get the current input frame source % - % value = stab.getFrameSource() + % value = stab.getFrameSource() % % ## Output % * __value__ output scalar struct @@ -176,128 +177,127 @@ function setFrameSource(this, frameSourceType, varargin) function setMotionEstimator(this, motionEstType, varargin) %SETMOTIONESTIMATOR Set the motion estimating algorithm for the video stabilizer % - % stab.setMotionEstimator(motionEstType, ...) + % stab.setMotionEstimator(motionEstType, ...) % - % stab.setMotionEstimator('KeypointBasedMotionEstimator', {estType, ...}, 'OptionName',optionValue, ...) - % stab.setMotionEstimator('FromFileMotionReader', path, 'OptionName',optionValue, ...) - % stab.setMotionEstimator('ToFileMotionWriter', path, {motionEstType, ...}, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('KeypointBasedMotionEstimator', {estType, ...}, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('FromFileMotionReader', path, 'OptionName',optionValue, ...) + % stab.setMotionEstimator('ToFileMotionWriter', path, {motionEstType, ...}, 'OptionName',optionValue, ...) % % ## Input % * __motionEstType__ Global 2D motion estimation methods which - % take frames as input. One of: - % * __KeypointBasedMotionEstimator__ Describes a global 2D - % motion estimation method which uses keypoints - % detection and optical flow for matching. - % * __FromFileMotionReader__ - % * __ToFileMotionWriter__ + % take frames as input. One of: + % * __KeypointBasedMotionEstimator__ Describes a global 2D + % motion estimation method which uses keypoints detection and + % optical flow for matching. + % * __FromFileMotionReader__ + % * __ToFileMotionWriter__ % * __path__ name of file for motion to read-from/write-to. % * __estType__ Global motion estimation method, which estimates - % global motion between two 2D point clouds as a 3x3 2D - % transformation matrix. One of: - % * __MotionEstimatorL1__ Describes a global 2D motion - % estimation method which minimizes L1 error. - % Note: To be able to use this method you must build - % OpenCV with CLP library support. - % * __MotionEstimatorRansacL2__ Describes a robust - % RANSAC-based global 2D motion estimation method - % which minimizes L2 error. + % global motion between two 2D point clouds as a 3x3 2D + % transformation matrix. One of: + % * __MotionEstimatorL1__ Describes a global 2D motion + % estimation method which minimizes L1 error. Note: To be able + % to use this method you must build OpenCV with CLP library + % support. + % * __MotionEstimatorRansacL2__ Describes a robust RANSAC-based + % global 2D motion estimation method which minimizes L2 error. % % ## Options % The following are options for the various algorithms: % % ### `KeypointBasedMotionEstimator`, `FromFileMotionReader`, `ToFileMotionWriter` % * __MotionModel__ Describes motion model between two point - % clouds. Default is based on the estimation method. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ - % * __Unknown__ + % clouds. Default is based on the estimation method. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ + % * __Unknown__ % % ### `KeypointBasedMotionEstimator` % * __Detector__ feature detector, specified in the form: - % `{detectorType, 'OptionName',optionValue, ...}`. - % See cv.FeatureDetector.FeatureDetector for a list of - % supported feature detectors. Default is `{'GFTTDetector'}`. + % `{detectorType, 'OptionName',optionValue, ...}`. See + % cv.FeatureDetector.FeatureDetector for a list of supported + % feature detectors. Default is `{'GFTTDetector'}`. % * __OpticalFlowEstimator__ sparse optical flow estimator - % specified as: `{optflowType, 'OptionName',optionValue, ...}`, - % where `optflowType` is one of: - % * __SparsePyrLkOptFlowEstimator__ (default) wrapper around - % cv.calcOpticalFlowPyrLK. - % * __SparsePyrLkOptFlowEstimatorGpu__ + % specified as: `{optflowType, 'OptionName',optionValue, ...}`, + % where `optflowType` is one of: + % * __SparsePyrLkOptFlowEstimator__ (default) wrapper around + % cv.calcOpticalFlowPyrLK. + % * __SparsePyrLkOptFlowEstimatorGpu__ % * __OutlierRejector__ outlier rejector specified as: - % `{rejectorType, 'OptionName',optionValue, ...}`, where - % `rejectorType` is one of: - % * __NullOutlierRejector__ (default) - % * __TranslationBasedLocalOutlierRejector__ + % `{rejectorType, 'OptionName',optionValue, ...}`, where + % `rejectorType` is one of: + % * __NullOutlierRejector__ (default) + % * __TranslationBasedLocalOutlierRejector__ % % ### `MotionEstimatorL1` and `MotionEstimatorRansacL2` % * __MotionModel__ Describes motion model between two point - % clouds. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ (default) - % * __Homography__ - % * __Unknown__ + % clouds. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ (default) + % * __Homography__ + % * __Unknown__ % % ### `MotionEstimatorRansacL2` % * __MinInlierRatio__ default 0.1 % * __RansacParams__ Describes RANSAC method parameters. A struct - % with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. - % - % If a string is passed, it uses the default RANSAC - % parameters for the given motion model. Here are the - % defaults corresponding to each motion model: - % - % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % - % By default is it set to 'Affine'. + % with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. + % + % If a string is passed, it uses the default RANSAC parameters + % for the given motion model. Here are the defaults + % corresponding to each motion model: + % + % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % + % By default is it set to 'Affine'. % % ### `SparsePyrLkOptFlowEstimator` % * __WinSize__ Size of the search window at each pyramid level. - % default [21,21] + % default [21,21] % * __MaxLevel__ 0-based maximal pyramid level number. default 3 % % ### `TranslationBasedLocalOutlierRejector` % * __CellSize__ default [50,50] % * __RansacParams__ Describes RANSAC method parameters. A struct - % with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. - % - % If a string is passed, it uses the default RANSAC - % parameters for the given motion model. Here are the - % defaults corresponding to each motion model: - % - % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` - % - % By default is it set to 'Translation'. + % with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. + % + % If a string is passed, it uses the default RANSAC parameters + % for the given motion model. Here are the defaults + % corresponding to each motion model: + % + % * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % * __Homography__ `struct('Size',4, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` + % + % By default is it set to 'Translation'. % % The class uses `KeypointBasedMotionEstimator` by default with % `MotionEstimatorRansacL2`. @@ -309,7 +309,7 @@ function setMotionEstimator(this, motionEstType, varargin) function value = getMotionEstimator(this) %GETMOTIONESTIMATOR Get the current motion estimating algorithm % - % value = stab.getMotionEstimator() + % value = stab.getMotionEstimator() % % ## Output % * __value__ output scalar struct @@ -322,15 +322,15 @@ function setMotionEstimator(this, motionEstType, varargin) function setDeblurer(this, deblurerType, varargin) %SETDEBLURER Set the deblurring algorithm for the video stabilizer % - % stab.setDeblurer(deblurerType, ...) + % stab.setDeblurer(deblurerType, ...) % - % stab.setDeblurer('NullDeblurer') - % stab.setDeblurer('WeightingDeblurer', 'OptionName',optionValue, ...) + % stab.setDeblurer('NullDeblurer') + % stab.setDeblurer('WeightingDeblurer', 'OptionName',optionValue, ...) % % ## Input % * __deblurerType__ Deblurring method. One of: - % * __NullDeblurer__ - % * __WeightingDeblurer__ + % * __NullDeblurer__ + % * __WeightingDeblurer__ % % ## Options % * __Radius__ default 0 @@ -345,7 +345,7 @@ function setDeblurer(this, deblurerType, varargin) function value = getDeblurer(this) %GETDEBLURER Gets the current deblurring algorithm % - % value = stab.getDeblurer() + % value = stab.getDeblurer() % % ## Output % * __value__ output scalar struct @@ -358,47 +358,47 @@ function setDeblurer(this, deblurerType, varargin) function setInpainter(this, inpainterType, varargin) %SETINPAINTER Set the inpainting algorithm for the video stabilizer % - % stab.setInpainter(inpainterType, ...) + % stab.setInpainter(inpainterType, ...) % - % stab.setInpainter('NullInpainter') - % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('InpaintingPipeline', {{inpainterType, ...}, {inpainterType, ...}, ...}, 'OptionName',optionValue, ...) - % stab.setInpainter('ConsistentMosaicInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('MotionInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('ColorAverageInpainter', 'OptionName',optionValue, ...) - % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('NullInpainter') + % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('InpaintingPipeline', {{inpainterType, ...}, {inpainterType, ...}, ...}, 'OptionName',optionValue, ...) + % stab.setInpainter('ConsistentMosaicInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('MotionInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('ColorAverageInpainter', 'OptionName',optionValue, ...) + % stab.setInpainter('ColorInpainter', 'OptionName',optionValue, ...) % % ## Input % * __inpainterType__ inpainting method. One of: - % * __NullInpainter__ Null inpainter. - % * __InpaintingPipeline__ A pipeline composed of other - % inpainters, applied in sequence. - % * __ConsistentMosaicInpainter__ - % * __MotionInpainter__ (requires CUDA) - % * __ColorAverageInpainter__ - % * __ColorInpainter__ + % * __NullInpainter__ Null inpainter. + % * __InpaintingPipeline__ A pipeline composed of other + % inpainters, applied in sequence. + % * __ConsistentMosaicInpainter__ + % * __MotionInpainter__ (requires CUDA) + % * __ColorAverageInpainter__ + % * __ColorInpainter__ % % ## Options % The following are options accepted by all algorithms: % % * __MotionModel__ Describes motion model between two point - % clouds. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ - % * __Unknown__ (default) + % clouds. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ + % * __Unknown__ (default) % * __Radius__ default 0 % % The following are options for the various algorithms: % % ### `ColorInpainter` % * __Method__ Inpainting algorithm. One of: - % * __NS__ Navier-Stokes based method - % * __Telea__ Method by Alexandru Telea (default) + % * __NS__ Navier-Stokes based method + % * __Telea__ Method by Alexandru Telea (default) % * __Radius2__ default 2.0 % % ### `ConsistentMosaicInpainter` @@ -406,9 +406,9 @@ function setInpainter(this, inpainterType, varargin) % % ### `MotionInpainter` % * __OptFlowEstimator__ dense optical flow estimator specified as - % `{optflowType, 'OptionName',optionValue, ...}`, where - % `optflowType` is one of: - % * __DensePyrLkOptFlowEstimatorGpu__ (default, requires CUDA) + % `{optflowType, 'OptionName',optionValue, ...}`, where + % `optflowType` is one of: + % * __DensePyrLkOptFlowEstimatorGpu__ (default, requires CUDA) % * __FlowErrorThreshold__ default 1e-4 % * __DistThreshold__ default 5.0 % * __BorderMode__ default 'Replicate' @@ -426,7 +426,7 @@ function setInpainter(this, inpainterType, varargin) function value = getInpainter(this) %GETINPAINTER Gets the current inpainting algorithm % - % value = stab.getInpainter() + % value = stab.getInpainter() % % ## Output % * __value__ output scalar struct @@ -442,20 +442,19 @@ function setInpainter(this, inpainterType, varargin) function setMotionStabilizer(this, motionStabType, varargin) %SETMOTIONSTABILIZER Set the motion stabilization algorithm for the video stabilizer % - % stab.setMotionStabilizer(motionStabType, ...) + % stab.setMotionStabilizer(motionStabType, ...) % - % stab.setMotionStabilizer('InpaintingPipeline', {{motionStabType, ...}, {motionStabType, ...}, ...}) - % stab.setMotionStabilizer('GaussianMotionFilter', 'OptionName',optionValue, ...) - % stab.setMotionStabilizer('LpMotionStabilizer', 'OptionName',optionValue, ...) + % stab.setMotionStabilizer('InpaintingPipeline', {{motionStabType, ...}, {motionStabType, ...}, ...}) + % stab.setMotionStabilizer('GaussianMotionFilter', 'OptionName',optionValue, ...) + % stab.setMotionStabilizer('LpMotionStabilizer', 'OptionName',optionValue, ...) % % ## Input % * __motionStabType__ motion stabilization method. One of: - % * __MotionStabilizationPipeline__ A pipeline composed of - % other motion stabilizers, applied in sequence. - % * __GaussianMotionFilter__ - % * __LpMotionStabilizer__ Note: To be able to use this - % method you must build OpenCV with CLP library - % support. + % * __MotionStabilizationPipeline__ A pipeline composed of other + % motion stabilizers, applied in sequence. + % * __GaussianMotionFilter__ + % * __LpMotionStabilizer__ Note: To be able to use this method + % you must build OpenCV with CLP library support. % % ## Options % The following are options for the various algorithms: @@ -466,15 +465,15 @@ function setMotionStabilizer(this, motionStabType, varargin) % % ### `LpMotionStabilizer` % * __MotionModel__ Describes motion model between two point - % clouds. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ (default) - % * __Affine__ - % * __Homography__ - % * __Unknown__ + % clouds. One of: + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ (default) + % * __Affine__ + % * __Homography__ + % * __Unknown__ % * __FrameSize__ default [0,0] % * __TrimRatio__ default 0.1 % * __Weight1__ default 1 @@ -491,7 +490,7 @@ function setMotionStabilizer(this, motionStabType, varargin) function value = getMotionStabilizer(this) %GETMOTIONSTABILIZER Get the current motion stabilization algorithm % - % value = stab.getMotionStabilizer() + % value = stab.getMotionStabilizer() % % ## Output % * __value__ output scalar struct @@ -504,23 +503,23 @@ function setMotionStabilizer(this, motionStabType, varargin) function setWobbleSuppressor(this, wobbleSuppressType, varargin) %SETWOBBLESUPPRESSOR Set the wobble suppressing algorithm for the video stabilizer % - % stab.setWobbleSuppressor(wobbleSuppressType, ...) + % stab.setWobbleSuppressor(wobbleSuppressType, ...) % - % stab.setWobbleSuppressor('NullWobbleSuppressor') - % stab.setWobbleSuppressor('MoreAccurateMotionWobbleSuppressor', 'OptionName',optionValue, ...) + % stab.setWobbleSuppressor('NullWobbleSuppressor') + % stab.setWobbleSuppressor('MoreAccurateMotionWobbleSuppressor', 'OptionName',optionValue, ...) % % ## Input % * __wobbleSuppressType__ wobble suppressing method. One of: - % * __NullWobbleSuppressor__ - % * __MoreAccurateMotionWobbleSuppressor__ + % * __NullWobbleSuppressor__ + % * __MoreAccurateMotionWobbleSuppressor__ % % ## Options % * __MotionEstimator__ Global 2D motion estimation method which - % take frames as input, specified as: - % `{motionEstType, {estType, 'key',val, ...}, 'OptionName',optionVal, ...}`. - % See cv.TwoPassStabilizer.setMotionEstimator for details. - % Default is - % `{'KeypointBasedMotionEstimator', {'MotionEstimatorRansacL2', 'MotionModel','Homography'}}` + % take frames as input, specified as: + % `{motionEstType, {estType, 'key',val, ...}, 'OptionName',optionVal, ...}`. + % See cv.TwoPassStabilizer.setMotionEstimator for details. + % Default is + % `{'KeypointBasedMotionEstimator', {'MotionEstimatorRansacL2', 'MotionModel','Homography'}}` % * __Period__ default 30 % % The class uses `NullWobbleSuppressor` by default. @@ -532,7 +531,7 @@ function setWobbleSuppressor(this, wobbleSuppressType, varargin) function value = getWobbleSuppressor(this) %GETWOBBLESUPPRESSOR Get the current wobble suppressing algorithm % - % value = stab.getWobbleSuppressor() + % value = stab.getWobbleSuppressor() % % ## Output % * __value__ output scalar struct @@ -548,26 +547,26 @@ function setWobbleSuppressor(this, wobbleSuppressType, varargin) function ransac = RansacParamsDefault2dMotion(model) %RANSACPARAMSDEFAULT2DMOTION Default RANSAC method parameters for a given motion model % - % ransac = cv.TwoPassStabilizer.RansacParamsDefault2dMotion(model) + % ransac = cv.TwoPassStabilizer.RansacParamsDefault2dMotion(model) % % ## Input % * __model__ Motion model. One of: - % * __Translation__ - % * __TranslationAndScale__ - % * __Rotation__ - % * __Rigid__ - % * __Similarity__ - % * __Affine__ - % * __Homography__ + % * __Translation__ + % * __TranslationAndScale__ + % * __Rotation__ + % * __Rigid__ + % * __Similarity__ + % * __Affine__ + % * __Homography__ % % ## Output % * __ransac__ Default RANSAC method parameters for the given - % motion model. A struct with the following fields: - % * __Size__ Subset size. - % * __Thresh__ Maximum re-projection error value to classify - % as inlier. - % * __Eps__ Maximum ratio of incorrect correspondences. - % * __Prob__ Required success probability. + % motion model. A struct with the following fields: + % * __Size__ Subset size. + % * __Thresh__ Maximum re-projection error value to classify as + % inlier. + % * __Eps__ Maximum ratio of incorrect correspondences. + % * __Prob__ Required success probability. % % Here are the parameters corresponding to each motion model: % diff --git a/+cv/Utils.m b/+cv/Utils.m index c47586e5a..4f3afaca0 100644 --- a/+cv/Utils.m +++ b/+cv/Utils.m @@ -8,15 +8,15 @@ function s = checkHardwareSupport() %CHECKHARDWARESUPPORT Returns hardware CPU features % - % s = cv.Utils.checkHardwareSupport() + % s = cv.Utils.checkHardwareSupport() % % ## Output % * __s__ Returns a structure for each CPU feature indicating if - % the feature is supported by the host hardware. When - % `setUseOptimized=false` is called, the subsequent calls to - % `checkHardwareSupport` will return false until - % `setUseOptimized=true` is called. This way user can - % dynamically switch on and off the optimized code in OpenCV. + % the feature is supported by the host hardware. When + % `setUseOptimized=false` is called, the subsequent calls to + % `checkHardwareSupport` will return false until + % `setUseOptimized=true` is called. This way user can + % dynamically switch on and off the optimized code in OpenCV. % s = Utils_('checkHardwareSupport'); end @@ -24,13 +24,15 @@ function info = getBuildInformation() %GETBUILDINFORMATION Returns full configuration time CMake output % - % info = cv.Utils.getBuildInformation() + % info = cv.Utils.getBuildInformation() % % ## Output % * __info__ Returned value is raw CMake output including version - % control system revision, compiler version, compiler flags, - % enabled modules and third party libraries, etc. Output - % format depends on target architecture. + % control system revision, compiler version, compiler flags, + % enabled modules and third party libraries, etc. Output format + % depends on target architecture. + % + % See also: cv.Utils.version % info = Utils_('getBuildInformation'); end @@ -38,11 +40,13 @@ function v = version() %VERSION Returns OpenCV version % - % v = cv.Utils.version() + % v = cv.Utils.version() % % ## Output % * __v__ current version of OpenCV, in the form - % 'major.minor.revision'. + % 'major.minor.revision'. + % + % See also: cv.Utils.getBuildInformation % v = Utils_('version'); end @@ -50,11 +54,11 @@ function n = getNumberOfCPUs() %GETNUMBEROFCPUS Return number of logical CPUs % - % n = cv.Utils.getNumberOfCPUs() + % n = cv.Utils.getNumberOfCPUs() % % ## Output % * __n__ Returns the number of logical CPUs available for the - % process. + % process. % n = Utils_('getNumberOfCPUs'); end @@ -62,29 +66,28 @@ function n = getNumThreads() %GETNUMTHREADS Returns number of threads used by OpenCV for parallel regions % - % n = cv.Utils.getNumThreads() + % n = cv.Utils.getNumThreads() % % ## Output % * __n__ number of threads. % - % The exact meaning of return value depends on the threading + % The exact meaning of the return value depends on the threading % framework used by OpenCV library: % % * __TBB__ The number of threads, that OpenCV will try to use for - % parallel regions. If there is any - % `tbb::thread_scheduler_init` in user code conflicting with - % OpenCV, then function returns default number of threads - % used by TBB library. + % parallel regions. If there is any `tbb::thread_scheduler_init` + % in user code conflicting with OpenCV, then function returns + % default number of threads used by TBB library. % * __OpenMP__ An upper bound on the number of threads that could - % be used to form a new team. + % be used to form a new team. % * __Concurrency__ The number of threads, that OpenCV will try to - % use for parallel regions. + % use for parallel regions. % * __GCD__ Unsupported; returns the GCD thread pool limit (512) - % for compatibility. + % for compatibility. % * __C=__ The number of threads, that OpenCV will try to use for - % parallel regions, if before called `setNumThreads` with - % `threads > 0`, otherwise returns the number of logical - % CPUs, available for the process. + % parallel regions, if before called `setNumThreads` with + % `threads > 0`, otherwise returns the number of logical CPUs, + % available for the process. % % See also: cv.Utils.setNumThreads % @@ -94,7 +97,7 @@ function setNumThreads(n) %SETNUMTHREADS Sets number of threads used by OpenCV for parallel regions % - % cv.Utils.setNumThreads(n) + % cv.Utils.setNumThreads(n) % % ## Input % * __n__ number of threads. @@ -102,16 +105,15 @@ function setNumThreads(n) % OpenCV will try to set the number of threads for the next % parallel region. % - % OpenCV will try to run it's functions with specified threads + % OpenCV will try to run its functions with specified threads % number, but some behaviour differs from framework: % % * __TBB__ User-defined parallel constructions will run with the - % same threads number, if another does not specified. If - % later on user creates own scheduler, OpenCV will use it. + % same threads number, if another is not specified. If later + % on user creates their own scheduler, OpenCV will use it. % * __OpenMP__ No special defined behaviour. % * __Concurrency__ If `threads == 1`, OpenCV will disable - % threading optimizations and run it's functions - % sequentially. + % threading optimizations and run its functions sequentially. % * __GCD__ Supports only values <= 0. % * __C=__ No special defined behaviour. % @@ -123,11 +125,11 @@ function setNumThreads(n) function tf = useOptimized() %USEOPTIMIZED Returns the status of optimized code usage % - % tf = cv.Utils.useOptimized() + % tf = cv.Utils.useOptimized() % % ## Output % * __tf__ The function returns true if the optimized code is - % enabled. Otherwise, it returns false. + % enabled. Otherwise, it returns false. % % See also: cv.Utils.setUseOptimized % @@ -137,7 +139,7 @@ function setNumThreads(n) function setUseOptimized(tf) %SETUSEOPTIMIZED Enables or disables the optimized code % - % cv.Utils.setUseOptimized(tf) + % cv.Utils.setUseOptimized(tf) % % ## Input % * __tf__ true or false value. @@ -148,10 +150,9 @@ function setUseOptimized(tf) % further checked by OpenCV functions. Since the flag is not % checked in the inner OpenCV loops, it is only safe to call the % function on the very top level in your application where you can - % be sure that no other OpenCV function is currently executed. - % By default, the optimized code is enabled unless you disable it - % in CMake. The current status can be retrieved using - % `useOptimized`. + % be sure that no other OpenCV function is currently executed. By + % default, the optimized code is enabled unless you disable it in + % CMake. The current status can be retrieved using `useOptimized`. % % See also: cv.Utils.useOptimized % @@ -159,4 +160,623 @@ function setUseOptimized(tf) end end + % IPP + methods (Static) + function str = getIppVersion() + %GETIPPVERSION Return IPP version string + % + % str = cv.Utils.getIppVersion() + % + % ## Output + % * __str__ version string. Returns 'disabled' if OpenCV is + % compiled without IPP support. + % + % See also: cv.Utils.useIPP + % + str = Utils_('getIppVersion'); + end + + function tf = useIPP() + %USEIPP Check if use of IPP is enabled + % + % tf = cv.Utils.useIPP() + % + % ## Output + % * __tf__ status flag + % + % Intel Integrated Performance Primitives Library. + % + % See also: cv.Utils.setUseIPP + % + tf = Utils_('useIPP'); + end + + function setUseIPP(tf) + %SETUSEIPP Enable/disable use of IPP + % + % cv.Utils.setUseIPP(tf) + % + % ## Input + % * __tf__ flag + % + % See also: cv.Utils.useIPP + % + Utils_('setUseIPP', tf); + end + + function tf = useIPP_NE() + %USEIPP_NE Check if use of IPP_NE is enabled + % + % tf = cv.Utils.useIPP_NE() + % + % ## Output + % * __tf__ status flag + % + % Intel IPP Not-Exact mode. + % + % See also: cv.Utils.setUseIPP_NE + % + tf = Utils_('useIPP_NE'); + end + + function setUseIPP_NE(tf) + %SETUSEIPP_NE Enable/disable use of IPP_NE + % + % cv.Utils.setUseIPP_NE(tf) + % + % ## Input + % * __tf__ flag + % + % IPP Not-Exact mode. This function may force use of IPP then both + % IPP and OpenCV provide proper results but have internal accuracy + % differences which have to much direct or indirect impact on + % accuracy tests. + % + % See also: cv.Utils.useIPP_NE + % + Utils_('setUseIPP_NE', tf); + end + end + + % OpenVX + methods (Static) + function tf = haveOpenVX() + %HAVEOPENVX Check if use of OpenVX is possible + % + % tf = cv.Utils.haveOpenVX() + % + % ## Output + % * __tf__ status flag + % + % See also: cv.Utils.useOpenVX + % + tf = Utils_('haveOpenVX'); + end + + function tf = useOpenVX() + %USEOPENVX Check if use of OpenVX is enabled + % + % tf = cv.Utils.useOpenVX() + % + % ## Output + % * __tf__ status flag + % + % See also: cv.Utils.setUseOpenVX + % + tf = Utils_('useOpenVX'); + end + + function setUseOpenVX(tf) + %SETUSEOPENVX Enable/disable use of OpenVX + % + % cv.Utils.setUseOpenVX(tf) + % + % ## Input + % * __tf__ flag + % + % See also: cv.Utils.useOpenVX + % + Utils_('setUseOpenVX', tf); + end + end + + % OpenCL + methods (Static) + function tf = haveOpenCL() + %HAVEOPENCL Check if use of OpenCL is possible + % + % tf = cv.Utils.haveOpenCL() + % + % ## Output + % * __tf__ status flag + % + % See also: cv.Utils.useOpenCL + % + tf = Utils_('haveOpenCL'); + end + + function tf = haveAmdBlas() + %HAVEAMDBLAS Check if have clAmdBlas + % + % tf = cv.Utils.haveAmdBlas() + % + % ## Output + % * __tf__ status flag + % + % AMD's OpenCL Basic Linear Algebra Subprograms Library. + % + % See also: cv.Utils.haveAmdFft + % + tf = Utils_('haveAmdBlas'); + end + + function tf = haveAmdFft() + %HAVEAMDFFT Check if have clAmdFft + % + % tf = cv.Utils.haveAmdFft() + % + % ## Output + % * __tf__ status flag + % + % AMD's OpenCL Fast Fourier Transform Library. + % + % See also: cv.Utils.haveAmdBlas + % + tf = Utils_('haveAmdFft'); + end + + function tf = haveSVM() + %HAVESVM Check if have OpenCL Shared Virtual Memory (SVM) + % + % tf = cv.Utils.haveSVM() + % + % ## Output + % * __tf__ status flag + % + % See also: cv.Utils.haveOpenCL + % + tf = Utils_('haveSVM'); + end + + function tf = useOpenCL() + %USEOPENCL Check if use of OpenCL is enabled + % + % tf = cv.Utils.useOpenCL() + % + % ## Output + % * __tf__ status flag + % + % See also: cv.Utils.setUseOpenCL + % + tf = Utils_('useOpenCL'); + end + + function setUseOpenCL(tf) + %SETUSEOPENCL Enable/disable use of OpenCL + % + % cv.Utils.setUseOpenCL(tf) + % + % ## Input + % * __tf__ flag + % + % See also: cv.Utils.useOpenCL + % + Utils_('setUseOpenCL', tf); + end + + function platforms = getPlatfomsInfo() + %GETPLATFOMSINFO Get information about OpenCL devices + % + % platforms = cv.Utils.getPlatfomsInfo() + % + % ## Output + % * __platforms__ struct-array of information about OpenCL + % platforms: + % * __name__ Platform name string. + % * __vendor__ Platform vendor string. + % * __version__ OpenCL version string supported by the + % implementation. + % * __device__ struct-array of information about OpenCL device: + % * __name__ Device name string. + % * __extensions__ Space separated list of extension names. + % * __version__ OpenCL version string supported by the device. + % * __vendorName__ Vendor name string. + % * __vendorID__ Vendor ID string. + % * **OpenCL_C_Version** The highest OpenCL C version + % supported by the compiler for this device. + % * __OpenCLVersion__ OpenCL version string supported by the + % device. + % * __deviceVersionMajor__ OpenCL major version supported by + % the device. + % * __deviceVersionMinor__ OpenCL minor version supported by + % the device. + % * __driverVersion__ OpenCL software driver version string. + % * __type__ The OpenCL device type (CPU, GPU, etc.). + % * __addressBits__ The default compute device address space + % size, 32 or 64 bits. + % * __available__ true if the device is available and false if + % the device is not available. + % * __compilerAvailable__ Is false if the implementation does + % not have a compiler available to compile the program + % source. Is true if the compiler is available. + % * __linkerAvailable__ Is false if the implementation does + % not have a linker available. Is true if the linker is + % available. + % * __doubleFPConfig__ Describes double precision + % floating-point capability of the device. + % * __singleFPConfig__ Describes single precision + % floating-point capability of the device: + % * __Denorm__ denorms are supported. + % * __InfNaN__ INF and NaNs are supported. + % * __RoundToNearest__ round to nearest even rounding mode + % supported. + % * __RoundToZero__ round to zero rounding mode supported. + % * __RoundToInf__ round to positive and negative infinity + % rounding modes supported. + % * __FMA__ IEEE754-2008 fused multiply-add is supported. + % * __SoftFloat__ Basic floating-point operations (such as + % addition, subtraction, multiplication) are implemented + % in software. + % * __halfFPConfig__ Describes the optional half precision + % floating-point capability of the device. + % * __endianLittle__ Is true if the OpenCL device is a little + % endian device and false otherwise. + % * __errorCorrectionSupport__ Is true if the device + % implements error correction for all accesses to compute + % device memory (global and constant). Is false if the + % device does not implement such error correction. + % * __executionCapabilities__ Describes the execution + % capabilities of the device: + % * __Kernel__ The OpenCL device can execute OpenCL kernels. + % * __NativeKernel__ The OpenCL device can execute native + % kernels. + % * __globalMemCacheSize__ Size of global memory cache in + % bytes. + % * __globalMemCacheType__ Type of global memory cache + % supported, one of: 'NoCache', 'ReadOnlyCache', + % 'ReadWriteCache'. + % * __globalMemCacheLineSize__ Size of global memory cache + % line in bytes. + % * __globalMemSize__ Size of global device memory in bytes. + % * __localMemSize__ Size of local memory arena in bytes. + % * __localMemType__ Type of local memory supported (local or + % global). + % * __hostUnifiedMemory__ Is true if the device and the host + % have a unified memory subsystem and is false otherwise. + % * __imageSupport__ Is CL_TRUE if images are supported by the + % OpenCL device and CL_FALSE otherwise. + % * __imageFromBufferSupport__ Is true if + % "cl_khr_image2d_from_buffer" extension is supported. + % * __imagePitchAlignment__ The row pitch alignment size in + % pixels for 2D images created from a buffer. + % * __imageBaseAddressAlignment__ + % * __image2DMaxWidth__ Max width of 2D image or 1D image not + % created from a buffer object in pixels + % * __image2DMaxHeight__ Max height of 2D image in pixels. + % * __image3DMaxWidth__ Max width of 3D image in pixels. + % * __image3DMaxHeight__ Max height of 3D image in pixels. + % * __image3DMaxDepth__ Max depth of 3D image in pixels. + % * __imageMaxBufferSize__ Max number of pixels for a 1D image + % created from a buffer object. + % * __imageMaxArraySize__ Max number of images in a 1D or 2D + % image array. + % * __maxClockFrequency__ Maximum configured clock frequency + % of the device in MHz. + % * __maxComputeUnits__ The number of parallel compute units + % on the OpenCL device. A work-group executes on a single + % compute unit. + % * __maxConstantArgs__ Max number of arguments declared with + % the `__constant` qualifier in a kernel. + % * __maxConstantBufferSize__ Max size in bytes of a constant + % buffer allocation. + % * __maxMemAllocSize__ Max size of memory object allocation + % in bytes. + % * __maxParameterSize__ Max size in bytes of all arguments + % that can be passed to a kernel. + % * __maxReadImageArgs__ Max number of image objects arguments + % of a kernel declared with the `read_only` qualifier. + % * __maxWriteImageArgs__ Max number of image objects + % arguments of a kernel declared with the `write_only` + % qualifier. + % * __maxSamplers__ Maximum number of samplers that can be + % used in a kernel. + % * __maxWorkGroupSize__ Maximum number of work-items in a + % work-group executing a kernel on a single compute unit, + % using the data parallel execution model. + % * __maxWorkItemDims__ Maximum dimensions that specify the + % global and local work-item IDs used by the data parallel + % execution model. + % * __maxWorkItemSizes__ Maximum number of work-items that can + % be specified in each dimension of the work-group. + % * __memBaseAddrAlign__ The minimum value is the size + % (in bits) of the largest OpenCL built-in data type + % supported by the device. + % * __nativeVectorWidthChar__, __nativeVectorWidthShort__, + % __nativeVectorWidthInt__, __nativeVectorWidthLong__, + % __nativeVectorWidthFloat__, __nativeVectorWidthDouble__, + % __nativeVectorWidthHalf__ The native ISA vector width. The + % vector width is defined as the number of scalar elements + % that can be stored in the vector. + % * __preferredVectorWidthChar__, __preferredVectorWidthShort__, + % __preferredVectorWidthInt__, __preferredVectorWidthLong__, + % __preferredVectorWidthFloat__, __preferredVectorWidthDouble__, + % __preferredVectorWidthHalf__ Preferred native vector width + % size for built-in scalar types that can be put into + % vectors. The vector width is defined as the number of + % scalar elements that can be stored in the vector. + % * __printfBufferSize__ Maximum size in bytes of the internal + % buffer that holds the output of `printf` calls from a + % kernel. + % * __profilingTimerResolution__ Resolution of timer, i.e. the + % number of nanoseconds elapsed before the timer is + % incremented. + % + % See OpenCL + % [docs](https://www.khronos.org/registry/OpenCL/sdk/2.0/docs/man/xhtml/clGetDeviceInfo.html). + % + % If OpenCV is compiled without OpenCL support, the function + % returns an empty struct. + % + % See also: cv.Utils.haveOpenCL + % + platforms = Utils_('getPlatfomsInfo'); + end + end + + % CUDA + methods (Static) + function num = getCudaEnabledDeviceCount() + %GETCUDAENABLEDDEVICECOUNT Returns the number of installed CUDA-enabled devices + % + % num = cv.Utils.getCudaEnabledDeviceCount() + % + % ## Output + % * __num__ number of installed CUDA devices. + % + % Use this function before any other CUDA functions calls. + % If OpenCV is compiled without CUDA support, this function + % returns 0. If the CUDA driver is not installed, or is + % incompatible, this function returns -1. + % + % Other CUDA functions will throw if no CUDA support. + % + % See also: cv.Utils.getDevice, cv.Utils.setDevice + % + num = Utils_('getCudaEnabledDeviceCount'); + end + + function device = getDevice() + %GETDEVICE Returns the current device index. + % + % device = cv.Utils.getDevice() + % + % ## Output + % * __device__ System index of current CUDA device. + % + % Returns the current device index set by cv.Utils.setDevice or + % initialized by default. + % + % See also: cv.Utils.setDevice + % + device = Utils_('getDevice'); + end + + function setDevice(device) + %SETDEVICE Sets a device and initializes it for the current thread + % + % cv.Utils.setDevice(device) + % + % ## Input + % * __device__ System index of a CUDA device starting with 0. + % + % If the call of this function is omitted, a default device is + % initialized at the fist CUDA usage. + % + % See also: cv.Utils.getDevice + % + Utils_('setDevice', device); + end + + function resetDevice() + %RESETDEVICE Explicitly destroys and cleans up all resources associated with the current device in the current process + % + % cv.Utils.resetDevice() + % + % Any subsequent API call to this device will reinitialize the + % device. + % + % See also: cv.Utils.setDevice + % + Utils_('resetDevice'); + end + + function s = deviceSupports() + %DEVICESUPPORTS checks features support of the current device + % + % s = cv.Utils.deviceSupports() + % + % ## Output + % * __s__ Returns a structure for each CUDA computing feature + % indicating if the feature is supported by the CUDA device. + % + % See also: cv.Utils.setDevice + % + s = Utils_('deviceSupports'); + end + + function printCudaDeviceInfo(device) + %PRINTCUDADEVICEINFO Print CUDA device info + % + % cv.Utils.printCudaDeviceInfo(device) + % + % ## Input + % * __device__ System index of a CUDA device starting with 0. + % + % See also: cv.Utils.printShortCudaDeviceInfo + % + Utils_('printCudaDeviceInfo', device); + end + + function printShortCudaDeviceInfo(device) + %PRINTSHORTCUDADEVICEINFO Print short CUDA device info + % + % cv.Utils.printShortCudaDeviceInfo(device) + % + % ## Input + % * __device__ System index of a CUDA device starting with 0. + % + % See also: cv.Utils.setDevice + % + Utils_('printCudaDeviceInfo', device); + end + + function dinfo = deviceInfo(device) + %DEVICEINFO Return CUDA device info + % + % dinfo = cv.Utils.deviceInfo(device) + % + % ## Input + % * __device__ System index of a CUDA device starting with 0. + % + % ## Output + % * __dinfo__ scalar struct of information about CUDA device: + % * __deviceID__ system index of the CUDA device starting with 0 + % * __name__ ASCII string identifying device + % * __totalGlobalMem__ global memory available on device in bytes + % * __sharedMemPerBlock__ shared memory available per block in + % bytes + % * __regsPerBlock__ 32-bit registers available per block + % * __warpSize__ warp size in threads + % * __memPitch__ maximum pitch in bytes allowed by memory copies + % * __maxThreadsPerBlock__ maximum number of threads per block + % * __maxThreadsDim__ maximum size of each dimension of a block + % * __maxGridSize__ maximum size of each dimension of a grid + % * __clockRate__ clock frequency in kilohertz + % * __totalConstMem__ constant memory available on device in + % bytes + % * __majorVersion__ major compute capability + % * __minorVersion__ minor compute capability + % * __textureAlignment__ alignment requirement for textures + % * __texturePitchAlignment__ pitch alignment requirement for + % texture references bound to pitched memory + % * __multiProcessorCount__ number of multiprocessors on device + % * __kernelExecTimeoutEnabled__ specified whether there is a + % run time limit on kernels + % * __integrated__ device is integrated as opposed to discrete + % * __canMapHostMemory__ device can map host memory with + % `cudaHostAlloc`/`cudaHostGetDevicePointer` + % * __computeMode__ compute mode, one of: + % * __Default__ default compute mode (Multiple threads can use + % `cudaSetDevice` with this device) + % * __Exclusive__ compute-exclusive-thread mode (Only one + % thread in one process will be able to use `cudaSetDevice` + % with this device) + % * __Prohibited__ compute-prohibited mode (No threads can use + % `cudaSetDevice` with this device) + % * __ExclusiveProcess__ compute-exclusive-process mode (Many + % threads in one process will be able to use `cudaSetDevice` + % with this device) + % * __maxTexture1D__ maximum 1D texture size + % * __maxTexture1DMipmap__ maximum 1D mipmapped texture size + % * __maxTexture1DLinear__ maximum size for 1D textures bound to + % linear memory + % * __maxTexture2D__ maximum 2D texture dimensions + % * __maxTexture2DMipmap__ maximum 2D mipmapped texture + % dimensions + % * __maxTexture2DLinear__ maximum dimensions (width, height, + % pitch) for 2D textures bound to pitched memory + % * __maxTexture2DGather__ maximum 2D texture dimensions if + % texture gather operations have to be performed + % * __maxTexture3D__ maximum 3D texture dimensions + % * __maxTextureCubemap__ maximum Cubemap texture dimensions + % * __maxTexture1DLayered__ maximum 1D layered texture dimensions + % * __maxTexture2DLayered__ maximum 2D layered texture dimensions + % * __maxTextureCubemapLayered__ maximum Cubemap layered texture + % dimensions + % * __maxSurface1D__ maximum 1D surface size + % * __maxSurface2D__ maximum 2D surface dimensions + % * __maxSurface3D__ maximum 3D surface dimensions + % * __maxSurface1DLayered__ maximum 1D layered surface dimensions + % * __maxSurface2DLayered__ maximum 2D layered surface dimensions + % * __maxSurfaceCubemap__ maximum Cubemap surface dimensions + % * __maxSurfaceCubemapLayered__ maximum Cubemap layered surface + % dimensions + % * __surfaceAlignment__ alignment requirements for surfaces + % * __concurrentKernels__ device can possibly execute multiple + % kernels concurrently + % * __ECCEnabled__ device has ECC support enabled + % * __pciBusID__ PCI bus ID of the device + % * __pciDeviceID__ PCI device ID of the device + % * __pciDomainID__ PCI domain ID of the device + % * __tccDriver__ true if device is a Tesla device using TCC + % driver, false otherwise + % * __asyncEngineCount__ number of asynchronous engines + % * __unifiedAddressing__ device shares a unified address space + % with the host + % * __memoryClockRate__ peak memory clock frequency in kilohertz + % * __memoryBusWidth__ global memory bus width in bits + % * __l2CacheSize__ size of L2 cache in bytes + % * __maxThreadsPerMultiProcessor__ maximum resident threads per + % multiprocessor + % * __freeMemory__ gets free memory + % * __totalMemory__ gets total device memory + % * __supports__ Struct which provides information on CUDA + % feature support. Is true if the device has the specified + % CUDA feature. Otherwise, it is false: + % * __Compute10__ Compute Capability 1.0 + % * __Compute11__ Compute Capability 1.1 + % * __Compute12__ Compute Capability 1.2 + % * __Compute13__ Compute Capability 1.3 + % * __Compute20__ Compute Capability 2.0 + % * __Compute21__ Compute Capability 2.1 + % * __Compute30__ Compute Capability 3.0 + % * __Compute32__ Compute Capability 3.2 + % * __Compute35__ Compute Capability 3.5 + % * __Compute50__ Compute Capability 5.0 + % * __GlobalAtomics__ same as 'Compute11' + % * __SharedAtomics__ same as 'Compute12' + % * __NativeDouble__ same as 'Compute13' + % * __WarpShuffleFunctions__ same as 'Compute30' + % * __DynamicParallelism__ same as 'Compute35' + % * __isCompatible__ Checks the CUDA module and device + % compatibility. Is true if the CUDA module can be run on the + % specified device. Otherwise, it is false. + % + % See also: cv.Utils.printCudaDeviceInfo + % + dinfo = Utils_('deviceInfo', device); + end + end + + % Tegra + methods (Static) + function tf = useTegra() + %USETEGRA Check if use of Tegra is enabled + % + % tf = cv.Utils.useTegra() + % + % ## Output + % * __tf__ status flag + % + % Nvidia's Tegra system on a chip (SoC). + % + % See also: cv.Utils.setUseTegra + % + tf = Utils_('useTegra'); + end + + function setUseTegra(tf) + %SETUSETEGRA Enable/disable use of Tegra + % + % cv.Utils.setUseTegra(tf) + % + % ## Input + % * __tf__ flag + % + % See also: cv.Utils.useTegra + % + Utils_('setUseTegra', tf); + end + end + end diff --git a/+cv/VideoCapture.m b/+cv/VideoCapture.m index 5e7abae14..f46054788 100644 --- a/+cv/VideoCapture.m +++ b/+cv/VideoCapture.m @@ -7,19 +7,19 @@ % ## Example % Here is how the class can be used: % - % cap = cv.VideoCapture(0); % open the default camera - % pause(3); % see note below - % if ~cap.isOpened() % check if we succeeded - % error('camera failed to initialized'); - % end - % for t=1:30 - % frame = cap.read(); % get a new frame from camera - % imshow(frame); - % pause(0.1); - % end - % % the camera will be deinitialized automatically in destructor - % % when "cap" goes out of scope - % % you can also explicitly call cap.release() to close the camera + % cap = cv.VideoCapture(0); % open the default camera + % pause(3); % see note below + % if ~cap.isOpened() % check if we succeeded + % error('camera failed to initialized'); + % end + % for t=1:30 + % frame = cap.read(); % get a new frame from camera + % imshow(frame); + % pause(0.1); + % end + % % the camera will be deinitialized automatically in destructor + % % when "cap" goes out of scope + % % you can also explicitly call cap.release() to close the camera % % Note: In some environments, there is a concurrency issue during camera % initialization. To avoid unexpected crash, pause for a few seconds after @@ -33,7 +33,7 @@ % Basically, the module provides the cv.VideoCapture and cv.VideoWriter % classes as 2-layer interface to many video I/O APIs used as backend. % - % ![image](http://docs.opencv.org/3.2.0/videoio_overview.svg) + % ![image](https://docs.opencv.org/3.3.1/videoio_overview.svg) % % Some backends such as (`DirectShow`) Direct Show, Video For Windows % (`VfW`), Microsoft Media Foundation (`MediaFoundation`), Video 4 Linux @@ -59,19 +59,19 @@ % % For example to grab from default camera using Direct Show as backend % - % % declare a capture object - % cap = cv.VideoCapture(0, 'API','DirectShow') + % % declare a capture object + % cap = cv.VideoCapture(0, 'API','DirectShow') % - % % or specify the API preference with open - % cap.open(0, 'API','DirectShow'); + % % or specify the API preference with open + % cap.open(0, 'API','DirectShow'); % % If you want to grab from a file using the Direct Show as backend: % - % % declare a capture object - % cap = cv.VideoCapture(filename, 'API','DirectShow') + % % declare a capture object + % cap = cv.VideoCapture(filename, 'API','DirectShow') % - % % or specify the API preference with open - % cap.open(filename, 'API','DirectShow'); + % % or specify the API preference with open + % cap.open(filename, 'API','DirectShow'); % % ### Enable backends % Backends are available only if they have been built with your OpenCV @@ -160,10 +160,10 @@ function this = VideoCapture(varargin) %VIDEOCAPTURE Open video file or a capturing device or a IP video stream for video capturing % - % cap = cv.VideoCapture() - % cap = cv.VideoCapture(index) - % cap = cv.VideoCapture(filename) - % cap = cv.VideoCapture(..., 'API',apiPreference) + % cap = cv.VideoCapture() + % cap = cv.VideoCapture(index) + % cap = cv.VideoCapture(filename) + % cap = cv.VideoCapture(..., 'API',apiPreference) % % ## Output % * __cap__ New instance of the VideoCapture @@ -190,7 +190,7 @@ function delete(this) %DELETE Destructor % - % cap.delete() + % cap.delete() % % The method first calls cv.VideoCapture.release to close the % already opened file or camera. @@ -204,69 +204,69 @@ function delete(this) function successFlag = open(this, index, varargin) %OPEN Open video file or a capturing device or a IP video stream for video capturing % - % successFlag = cap.open(index) - % successFlag = cap.open(filename) - % successFlag = cap.open(..., 'API',apiPreference) + % successFlag = cap.open(index) + % successFlag = cap.open(filename) + % successFlag = cap.open(..., 'API',apiPreference) % % ## Input % * __index__ id of the video capturing device to open. To open - % default camera using default backend just pass 0. + % default camera using default backend just pass 0. % * __filename__ it can be: - % * name of video file (eg. `video.avi`) - % * or image sequence (eg. `img_%02d.jpg`, which will read - % samples like `img_00.jpg`, img_01.jpg`, img_02.jpg`, ...) - % * or URL of video stream - % (eg. `protocol://host:port/script_name?script_params|auth`). - % Note that each video stream or IP camera feed has its - % own URL scheme. Please refer to the documentation of - % source stream to know the right URL. - % * or device name (for backends like V4L or gPhoto2) + % * name of video file (eg. `video.avi`) + % * or image sequence (eg. `img_%02d.jpg`, which will read + % samples like `img_00.jpg`, img_01.jpg`, img_02.jpg`, ...) + % * or URL of video stream + % (eg. `protocol://host:port/script_name?script_params|auth`). + % Note that each video stream or IP camera feed has its own + % URL scheme. Please refer to the documentation of source + % stream to know the right URL. + % * or device name (for backends like V4L or gPhoto2) % % ## Output % * __successFlag__ bool, true if the file/camera has been - % successfully opened. + % successfully opened. % % ## Options % * __API__ preferred capture API backend to use. Can be used to - % enforce a specific reader implementation if multiple are - % available: e.g. 'FFMPEG' or 'Images' or 'DirectShow'. The - % list of supported API backends: - % * __Any__ Auto detect. This is the default. - % * __VfW__ Video For Windows (platform native). - % * __V4L__ V4L/V4L2 capturing support via libv4l. - % * __V4L2__ Same as V4L. - % * __FireWire__ IEEE 1394 drivers. - % * __FireWare__ Same as FireWire. - % * __IEEE1394__ Same as FireWire. - % * __DC1394__ Same as FireWire. - % * __CMU1394__ Same as FireWire. - % * __QuickTime__ QuickTime. - % * __Unicap__ Unicap drivers. - % * __DirectShow__ DirectShow (via videoInput). - % * __PvAPI__ PvAPI, Prosilica GigE SDK. - % * __OpenNI__ OpenNI (for Kinect). - % * __OpenNIAsus__ OpenNI (for Asus Xtion). - % * __Android__ Android - not used. - % * __XIMEA__ XIMEA Camera API. - % * __AVFoundation__ AVFoundation framework for iOS - % (OS X Lion will have the same API). - % * __Giganetix__ Smartek Giganetix GigEVisionSDK. - % * __MediaFoundation__ Microsoft Media Foundation - % (via videoInput). - % * __WinRT__ Microsoft Windows Runtime using Media - % Foundation. - % * __IntelPerC__ Intel Perceptual Computing SDK. - % * __OpenNI2__ OpenNI2 (for Kinect). - % * __OpenNI2Asus__ OpenNI2 (for Asus Xtion and - % Occipital Structure sensors). - % * __gPhoto2__ gPhoto2 connection. - % * __GStreamer__ GStreamer. - % * __FFMPEG__ Open video file or stream using the FFMPEG - % library. - % * __Images__ OpenCV Image Sequence (e.g. `img_%02d.jpg`). - % * __Aravis__ Aravis GigE SDK. - % * __MotionJPEG__ Built-in OpenCV MotionJPEG codec. - % * __MediaSDK__ Intel MediaSDK. + % enforce a specific reader implementation if multiple are + % available: e.g. 'FFMPEG' or 'Images' or 'DirectShow'. The list + % of supported API backends: + % * __Any__ Auto detect. This is the default. + % * __VfW__ Video For Windows (platform native). + % * __V4L__ V4L/V4L2 capturing support via libv4l. + % * __V4L2__ Same as V4L. + % * __FireWire__ IEEE 1394 drivers. + % * __FireWare__ Same as FireWire. + % * __IEEE1394__ Same as FireWire. + % * __DC1394__ Same as FireWire. + % * __CMU1394__ Same as FireWire. + % * __QuickTime__ QuickTime. + % * __Unicap__ Unicap drivers. + % * __DirectShow__ DirectShow (via videoInput). + % * __PvAPI__ PvAPI, Prosilica GigE SDK. + % * __OpenNI__ OpenNI (for Kinect). + % * __OpenNIAsus__ OpenNI (for Asus Xtion). + % * __Android__ Android - not used. + % * __XIMEA__ XIMEA Camera API. + % * __AVFoundation__ AVFoundation framework for iOS + % (OS X Lion will have the same API). + % * __Giganetix__ Smartek Giganetix GigEVisionSDK. + % * __MediaFoundation__ Microsoft Media Foundation + % (via videoInput). + % * __WinRT__ Microsoft Windows Runtime using Media + % Foundation. + % * __IntelPerC__ Intel Perceptual Computing SDK. + % * __OpenNI2__ OpenNI2 (for Kinect). + % * __OpenNI2Asus__ OpenNI2 (for Asus Xtion and Occipital + % Structure sensors). + % * __gPhoto2__ gPhoto2 connection. + % * __GStreamer__ GStreamer. + % * __FFMPEG__ Open video file or stream using the FFMPEG + % library. + % * __Images__ OpenCV Image Sequence (e.g. `img_%02d.jpg`). + % * __Aravis__ Aravis GigE SDK. + % * __MotionJPEG__ Built-in OpenCV MotionJPEG codec. + % * __MediaSDK__ Intel MediaSDK. % % The method first calls cv.VideoCapture.release to close the % already opened file or camera. @@ -277,9 +277,9 @@ function delete(this) % 'DirectShow'. For example, to open camera 1 using the % "MS Media Foundation" API: % - % cap.open(1, 'API','MediaFoundation') + % cap.open(1, 'API','MediaFoundation') % - % ## Note + % ### Note % Backends are available only if they have been built with your % OpenCV binaries. % Check your build to know which APIs are currently available. @@ -295,7 +295,7 @@ function delete(this) function retval = isOpened(this) %ISOPENED Returns true if video capturing has been initialized already % - % retval = cap.isOpened() + % retval = cap.isOpened() % % ## Output % * __retval__ bool, return value @@ -311,7 +311,7 @@ function delete(this) function release(this) %RELEASE Closes video file or capturing device % - % cap.release() + % cap.release() % % The method is automatically called by subsequent % cv.VideoCapture.open and by destructor. @@ -324,16 +324,16 @@ function release(this) function frame = read(this, varargin) %READ Grabs, decodes and returns the next video frame % - % frame = cap.read() - % frame = cap.read('OptionName',optionValue, ...) + % frame = cap.read() + % frame = cap.read('OptionName',optionValue, ...) % % ## Output % * __frame__ output image % % ## Options % * __FlipChannels__ in case the output is color image, flips the - % color order from OpenCV's BGR to MATLAB's RGB order. - % default true + % color order from OpenCV's BGR to MATLAB's RGB order. + % default true % % The method combines cv.VideoCapture.grab and % cv.VideoCapture.retrieve in one call. This is the most @@ -350,7 +350,7 @@ function release(this) function successFlag = grab(this) %GRAB Grabs the next frame from video file or capturing device % - % successFlag = cap.grab() + % successFlag = cap.grab() % % ## Output % * __successFlag__ bool, true (non-zero) in the case of success. @@ -381,19 +381,19 @@ function release(this) function frame = retrieve(this, varargin) %RETRIEVE Decodes and returns the grabbed video frame % - % frame = cap.retrieve() - % frame = cap.retrieve('OptionName',optionValue, ...) + % frame = cap.retrieve() + % frame = cap.retrieve('OptionName',optionValue, ...) % % ## Output % * __frame__ the video frame is returned here. If no frames has - % been grabbed the image will be empty. + % been grabbed the image will be empty. % % ## Options % * __StreamIdx__ 0-based index (for multi-head camera). It could - % be a frame index or a driver specific flag. default 0 + % be a frame index or a driver specific flag. default 0 % * __FlipChannels__ in case the output is color image, flips the - % color order from OpenCV's BGR to MATLAB's RGB order. - % default true + % color order from OpenCV's BGR to MATLAB's RGB order. + % default true % % The function decodes and returns the just grabbed frame. If no % frames has been grabbed (camera has been disconnected, or there @@ -408,19 +408,19 @@ function release(this) function value = get(this, prop) %GET Returns the specified VideoCapture property % - % value = cap.get(prop) + % value = cap.get(prop) % % ## Input % * __prop__ Property identifier. It can be specified as a string - % (one of the recognized properties), or directly as its - % corresponding integer code. + % (one of the recognized properties), or directly as its + % corresponding integer code. % % ## Output % * __value__ Value for the specified property (as a `double`). - % Value 0 is returned when querying a property that is not - % supported by the backend used by the VideoCapture instance. + % Value 0 is returned when querying a property that is not + % supported by the backend used by the VideoCapture instance. % - % ## Note + % ### Note % Reading/writing properties involves many layers. Some unexpected % result might happens along this chain. % `VideoCapture -> API Backend -> Operating System -> Device Driver -> Device Hardware` @@ -432,9 +432,9 @@ function release(this) % ## Example % All the following are equivalent: % - % b = cap.Brightness - % b = cap.get('Brightness') - % b = cap.get(10) % enum value defined in OpenCV source code + % b = cap.Brightness + % b = cap.get('Brightness') + % b = cap.get(10) % enum value defined in OpenCV source code % % See also: cv.VideoCapture.set % @@ -444,12 +444,12 @@ function release(this) function set(this, prop, value) %SET Sets a property in the VideoCapture % - % cap.set(prop, value) + % cap.set(prop, value) % % ## Input % * __prop__ Property identifier. It can be specified as a string - % (one of the recognized properties), or directly as its - % corresponding integer code. + % (one of the recognized properties), or directly as its + % corresponding integer code. % * __value__ Value of the property (as a `double`). % % On failure (unsupported property by backend), the function diff --git a/+cv/VideoWriter.m b/+cv/VideoWriter.m index 8ab04232b..8755c5b6d 100644 --- a/+cv/VideoWriter.m +++ b/+cv/VideoWriter.m @@ -9,9 +9,9 @@ % ## Example % Here is how to write to a video file: % - % vid = cv.VideoWriter('myvideo.mpg', [640,480]); - % vid.write(im); % add a frame - % clear vid; % finish + % vid = cv.VideoWriter('myvideo.mpg', [640,480]); + % vid.write(im); % add a frame + % clear vid; % finish % % See also: cv.VideoWriter.VideoWriter, cv.VideoWriter.write, % cv.VideoCapture, VideoWriter, avifile, movie2avi, @@ -42,9 +42,9 @@ function this = VideoWriter(varargin) %VIDEOWRITER VideoWriter constructor % - % vid = cv.VideoWriter() - % vid = cv.VideoWriter(filename, frameSize) - % vid = cv.VideoWriter(filename, frameSize, 'OptionName',optionValue, ...) + % vid = cv.VideoWriter() + % vid = cv.VideoWriter(filename, frameSize) + % vid = cv.VideoWriter(filename, frameSize, 'OptionName',optionValue, ...) % % ## Output % * __vid__ New instance of the VideoWriter @@ -67,7 +67,7 @@ function delete(this) %DELETE Destructor % - % vid.delete() + % vid.delete() % % The method first calls cv.VideoWriter.release to close the % already opened file. @@ -81,55 +81,54 @@ function delete(this) function successFlag = open(this, filename, frameSize, varargin) %OPEN Initializes or reinitializes video writer % - % successFlag = vid.open(filename, frameSize) - % successFlag = vid.open(filename, frameSize, 'OptionName',optionValue, ...) + % successFlag = vid.open(filename, frameSize) + % successFlag = vid.open(filename, frameSize, 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of the output video file % * __frameSize__ Size of the video frames `[width, height]`. % % ## Output - % * __successFlag__ bool, true if video writer has been successfully - % initialized. + % * __successFlag__ bool, true if video writer has been + % successfully initialized. % % ## Options % * __API__ The parameter allows to specify API backends to use. - % Can be used to enforce a specific writer implementation if - % multiple are available: e.g. 'FFMPEG' or 'GStreamer' - % * __Any__ (default) Auto detect - % * __VfW__ Video For Windows - % * __QuickTime__ QuickTime - % * __AVFoundation__ AVFoundation framework for iOS - % * __MediaFoundation__ Microsoft Media Foundation - % * __GStreamer__ GStreamer - % * __FFMPEG__ FFMPEG library - % * __Images__ OpenCV Image Sequence (e.g. `img_%02d.jpg`) - % * __MotionJPEG__ Built-in OpenCV MotionJPEG codec - % * __MediaSDK__ Intel Media SDK + % Can be used to enforce a specific writer implementation if + % multiple are available: e.g. 'FFMPEG' or 'GStreamer' + % * __Any__ (default) Auto detect + % * __VfW__ Video For Windows + % * __QuickTime__ QuickTime + % * __AVFoundation__ AVFoundation framework for iOS + % * __MediaFoundation__ Microsoft Media Foundation + % * __GStreamer__ GStreamer + % * __FFMPEG__ FFMPEG library + % * __Images__ OpenCV Image Sequence (e.g. `img_%02d.jpg`) + % * __MotionJPEG__ Built-in OpenCV MotionJPEG codec + % * __MediaSDK__ Intel Media SDK % * __FourCC__ 4-character code of codec used to compress the - % frames. List of codes can be obtained at [FOURCC]. FFMPEG - % backend with MP4 container natively uses other values as - % FourCC code: see [ObjectType], so you may receive a - % warning message from OpenCV about fourcc code conversion. - % Examples are: - % * __PIM1__ MPEG-1 codec - % * __MJPG__ Motion-JPEG codec (default) - % * __MP42__ MPEG-4 (Microsoft) - % * __DIV3__ DivX MPEG-4 Part 2 - % * __DIVX__ DivX codec - % * __XVID__ XVID MPEG-4 Part 2 - % * __U263__ H263 - % * __I263__ ITU H.263 - % * __FLV1__ Sorenson Spark (Flash Video) - % * __X264__ H.264 - % * __AVC1__ MPEG-4 Part 10/H.264 (Apple) - % * __WMV1__ Windows Media Video 7 (Microsoft) - % * __WMV2__ Windows Media Video 8 (Microsoft) - % * __-1__ prompts with codec selection dialog (Windows only) + % frames. List of codes can be obtained at [FOURCC]. FFMPEG + % backend with MP4 container natively uses other values as + % FourCC code: see [ObjectType], so you may receive a warning + % message from OpenCV about fourcc code conversion. Examples are: + % * __PIM1__ MPEG-1 codec + % * __MJPG__ Motion-JPEG codec (default) + % * __MP42__ MPEG-4 (Microsoft) + % * __DIV3__ DivX MPEG-4 Part 2 + % * __DIVX__ DivX codec + % * __XVID__ XVID MPEG-4 Part 2 + % * __U263__ H263 + % * __I263__ ITU H.263 + % * __FLV1__ Sorenson Spark (Flash Video) + % * __X264__ H.264 + % * __AVC1__ MPEG-4 Part 10/H.264 (Apple) + % * __WMV1__ Windows Media Video 7 (Microsoft) + % * __WMV2__ Windows Media Video 8 (Microsoft) + % * __-1__ prompts with codec selection dialog (Windows only) % * __FPS__ Framerate of the created video stream. default 25. % * __Color__ If true, the encoder will expect and encode color - % frames, otherwise it will work with grayscale frames (the - % flag is currently supported on Windows only). default true + % frames, otherwise it will work with grayscale frames (the flag + % is currently supported on Windows only). default true % % The method first calls cv.VideoWriter.release to close the % already opened file. @@ -140,13 +139,12 @@ function delete(this) % * On Windows FFMPEG or VFW is used; % * On MacOSX QTKit is used. % - % ## Tips + % ### Tips % * With some backends `FourCC=-1` pops up the codec selection % dialog from the system. - % * To save image sequence use a proper filename - % (eg. `img_%02d.jpg`) and `FourCC=0` OR `FPS=0`. Use - % uncompressed image format (eg. `img_%02d.BMP`) to save raw - % frames. + % * To save image sequence use a proper filename (eg. + % `img_%02d.jpg`) and `FourCC=0` OR `FPS=0`. Use uncompressed + % image format (eg. `img_%02d.BMP`) to save raw frames. % * Most codecs are lossy. If you want lossless video file you % need to use a lossless codecs (eg. FFMPEG FFV1, Huffman HFYU, % Lagarith LAGS, etc...) @@ -168,7 +166,7 @@ function delete(this) function retval = isOpened(this) %ISOPENED Returns true if video writer has been successfully initialized % - % retval = vid.isOpened() + % retval = vid.isOpened() % % ## Output % * __val__ bool, return value @@ -183,7 +181,7 @@ function delete(this) function release(this) %RELEASE Closes the video writer % - % vid.release() + % vid.release() % % The method is automatically called by subsequent % cv.VideoWriter.open and by the cv.VideoWriter destructor. @@ -196,16 +194,16 @@ function release(this) function write(this, frame, varargin) %WRITE Writes the next video frame % - % vid.write(frame) - % vid.write(frame, 'OptionName',optionValue, ...) + % vid.write(frame) + % vid.write(frame, 'OptionName',optionValue, ...) % % ## Input % * __frame__ The written frame % % ## Options % * __FlipChannels__ in case the input is color image, flips the - % color order from MATLAB's RGB to OpenCV's BGR order. - % default true + % color order from MATLAB's RGB to OpenCV's BGR order. + % default true % % The method writes the specified image to video file. It must % have the same size as has been specified when opening the video @@ -219,17 +217,17 @@ function write(this, frame, varargin) function value = get(this, prop) %GET Returns the specified VideoWriter property % - % value = vid.get(prop) + % value = vid.get(prop) % % ## Input % * __prop__ Property identifier. It can be specified as a string - % (one of the recognized properties), or directly as its - % corresponding integer code. + % (one of the recognized properties), or directly as its + % corresponding integer code. % % ## Output % * __value__ Value for the specified property (as a `double`). - % Value 0 is returned when querying a property that is not - % supported by the backend used by the VideoWriter instance. + % Value 0 is returned when querying a property that is not + % supported by the backend used by the VideoWriter instance. % % See also: cv.VideoWriter.set % @@ -239,12 +237,12 @@ function write(this, frame, varargin) function set(this, prop, value) %SET Sets a property in the VideoWriter % - % vid.set(prop, value) + % vid.set(prop, value) % % ## Input % * __prop__ Property identifier. It can be specified as a string - % (one of the recognized properties), or directly as its - % corresponding integer code. + % (one of the recognized properties), or directly as its + % corresponding integer code. % * __value__ Value of the property (as a `double`). % % On failure (unsupported property by backend), the function diff --git a/+cv/absdiff.m b/+cv/absdiff.m index cb3236c03..962d7087c 100644 --- a/+cv/absdiff.m +++ b/+cv/absdiff.m @@ -1,6 +1,6 @@ %ABSDIFF Calculates the per-element absolute difference between two arrays or between an array and a scalar % -% dst = cv.absdiff(src1, src2) +% dst = cv.absdiff(src1, src2) % % ## Input % * __src1__ first input array or a scalar. @@ -14,19 +14,19 @@ % * Absolute difference between two arrays when they have the same size and % type: % -% dst(I) = saturate(|src1(I) - src2(I)|) +% dst(I) = saturate(|src1(I) - src2(I)|) % % * Absolute difference between an array and a scalar when the second array is % constructed from Scalar or has as many elements as the number of channels % in `src1`: % -% dst(I) = saturate(|src1(I) - src2|) +% dst(I) = saturate(|src1(I) - src2|) % % * Absolute difference between a scalar and an array when the first array is % constructed from Scalar or has as many elements as the number of channels % in `src2`: % -% dst(I) = saturate(|src1 - src2(I)|) +% dst(I) = saturate(|src1 - src2(I)|) % % where `I` is a multi-dimensional index of array elements. In case of % multi-channel arrays, each channel is processed independently. diff --git a/+cv/accumulate.m b/+cv/accumulate.m index 898911ef2..bb383aa2e 100644 --- a/+cv/accumulate.m +++ b/+cv/accumulate.m @@ -1,13 +1,13 @@ %ACCUMULATE Adds an image to the accumulator % -% dst = cv.accumulate(src, dst) -% dst = cv.accumulate(src, dst, 'OptionName',optionValue, ...) +% dst = cv.accumulate(src, dst) +% dst = cv.accumulate(src, dst, 'OptionName',optionValue, ...) % % ## Input -% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double, with -% any number of channels +% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double`, with +% any number of channels % * __dst__ Input accumulator image with the same number of channels as input -% image, and a depth of `single` or `double`. +% image, and a depth of `single` or `double`. % % ## Output % * __dst__ Output accumulator image. @@ -17,7 +17,7 @@ % % The function adds `src` or some of its elements to `dst`: % -% dst(x,y) = dst(x,y) + src(x,y) if mask(x,y)~=0 +% dst(x,y) = dst(x,y) + src(x,y) if mask(x,y)~=0 % % The function supports multi-channel images. Each channel is processed % independently. diff --git a/+cv/accumulateProduct.m b/+cv/accumulateProduct.m index 0ed7cdbdf..492c99874 100644 --- a/+cv/accumulateProduct.m +++ b/+cv/accumulateProduct.m @@ -1,14 +1,14 @@ %ACCUMULATEPRODUCT Adds the per-element product of two input images to the accumulator % -% dst = cv.accumulateProduct(src1, src2, dst) -% dst = cv.accumulateProduct(src1, src2, dst, 'OptionName',optionValue, ...) +% dst = cv.accumulateProduct(src1, src2, dst) +% dst = cv.accumulateProduct(src1, src2, dst, 'OptionName',optionValue, ...) % % ## Input -% * __src1__ First input image of type `uint8`, `uint16`, `single`, or `double, -% with any number of channels +% * __src1__ First input image of type `uint8`, `uint16`, `single`, or +% `double`, with any number of channels % * __src2__ Second input image of the same type and the same size as `src1`. % * __dst__ Input accumulator image with the same number of channels as input -% images, and a depth of `single` or `double`. +% images, and a depth of `single` or `double`. % % ## Output % * __dst__ Output accumulator image. @@ -19,7 +19,7 @@ % The function adds the product of two images or their selected regions to the % accumulator `dst`: % -% dst(x,y) = dst(x,y) + src1(x,y)*src2(x,y) if mask(x,y)~=0 +% dst(x,y) = dst(x,y) + src1(x,y)*src2(x,y) if mask(x,y)~=0 % % The function supports multi-channel images. Each channel is processed % independently. diff --git a/+cv/accumulateSquare.m b/+cv/accumulateSquare.m index 4bd363356..6ded85633 100644 --- a/+cv/accumulateSquare.m +++ b/+cv/accumulateSquare.m @@ -1,13 +1,13 @@ %ACCUMULATESQUARE Adds the square of a source image to the accumulator % -% dst = cv.accumulateSquare(src, dst) -% dst = cv.accumulateSquare(src, dst, 'OptionName',optionValue, ...) +% dst = cv.accumulateSquare(src, dst) +% dst = cv.accumulateSquare(src, dst, 'OptionName',optionValue, ...) % % ## Input -% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double, with -% any number of channels +% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double`, with +% any number of channels % * __dst__ Input accumulator image with the same number of channels as input -% image, and a depth of `single` or `double`. +% image, and a depth of `single` or `double`. % % ## Output % * __dst__ Output accumulator image. @@ -18,7 +18,7 @@ % The function adds the input image `src` or its selected region, raised to a % power of 2, to the accumulator `dst`: % -% dst(x,y) = dst(x,y) + src(x,y)^2 if mask(x,y)~=0 +% dst(x,y) = dst(x,y) + src(x,y)^2 if mask(x,y)~=0 % % The function supports multi-channel images. Each channel is processed % independently. diff --git a/+cv/accumulateWeighted.m b/+cv/accumulateWeighted.m index 904193618..90431c042 100644 --- a/+cv/accumulateWeighted.m +++ b/+cv/accumulateWeighted.m @@ -1,13 +1,13 @@ %ACCUMULATEWEIGHTED Updates a running average % -% dst = cv.accumulateWeighted(src, dst, alpha) -% dst = cv.accumulateWeighted(..., 'OptionName',optionValue, ...) +% dst = cv.accumulateWeighted(src, dst, alpha) +% dst = cv.accumulateWeighted(..., 'OptionName',optionValue, ...) % % ## Input -% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double, with -% any number of channels +% * __src__ Input image of type `uint8`, `uint16`, `single`, or `double`, with +% any number of channels % * __dst__ Input accumulator image with the same number of channels as input -% image, and a depth of `single` or `double`. +% image, and a depth of `single` or `double`. % * __alpha__ Weight of the input image. A scalar double. % % ## Output @@ -20,7 +20,7 @@ % accumulator `dst` so that `dst` becomes a running average of a frame % sequence: % -% dst(x,y) = (1-alpha)*dst(x,y) + alpha*src(x,y) if mask(x,y)~=0 +% dst(x,y) = (1-alpha)*dst(x,y) + alpha*src(x,y) if mask(x,y)~=0 % % That is, `alpha` regulates the update speed (how fast the accumulator % "forgets" about earlier images). The function supports multi-channel images. diff --git a/+cv/adaptiveThreshold.m b/+cv/adaptiveThreshold.m index cb8fd6abe..da3c329f9 100644 --- a/+cv/adaptiveThreshold.m +++ b/+cv/adaptiveThreshold.m @@ -1,7 +1,7 @@ %ADAPTIVETHRESHOLD Applies an adaptive threshold to an array % -% dst = cv.adaptiveThreshold(src) -% dst = cv.adaptiveThreshold(src, 'OptionName',optionValue, ...) +% dst = cv.adaptiveThreshold(src) +% dst = cv.adaptiveThreshold(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit single-channel `uint8` image. @@ -11,38 +11,36 @@ % % ## Options % * __MaxValue__ Non-zero value assigned to the pixels for which the condition -% is satisfied. See the details below. default 255 -% * __Method__ Adaptive thresholding algorithm to use, default 'Mean'. -% One of: -% * __Mean__ the threshold value `T(x,y)` is a mean of the -% `BlockSize x BlockSize` neighborhood of `(x,y)` minus `C` -% * __Gaussian__ the threshold value `T(x,y)` is a weighted sum -% (cross-correlation with a Gaussian window) of the -% `BlockSize x BlockSize` neighborhood of `(x,y)` minus `C`. -% The default sigma (standard deviation) is used for the specified -% `BlockSize`. See cv.getGaussianKernel +% is satisfied. See the details below. default 255 +% * __Method__ Adaptive thresholding algorithm to use, default 'Mean'. One of: +% * __Mean__ the threshold value `T(x,y)` is a mean of the +% `BlockSize x BlockSize` neighborhood of `(x,y)` minus `C` +% * __Gaussian__ the threshold value `T(x,y)` is a weighted sum +% (cross-correlation with a Gaussian window) of the `BlockSize x BlockSize` +% neighborhood of `(x,y)` minus `C`. The default sigma (standard deviation) +% is used for the specified `BlockSize`. See cv.getGaussianKernel % * __Type__ Thresholding type, default 'Binary'. One of: -% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxValue : 0` -% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxValue` +% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxValue : 0` +% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxValue` % * __BlockSize__ Size of a pixel neighborhood that is used to calculate a -% threshold value for the pixel: 3, 5, 7, and so on. Default 3 -% * __C__ Constant subtracted from the mean or weighted mean. Normally, it -% is positive but may be zero or negative as well. Default 5 +% threshold value for the pixel: 3, 5, 7, and so on. Default 3 +% * __C__ Constant subtracted from the mean or weighted mean. Normally, it is +% positive but may be zero or negative as well. Default 5 % % The function transforms a grayscale image to a binary image according to % the formulae: % % * __Binary__ % -% | maxValue, if src(x,y) > T(x,y) -% dst(x,y) = | -% | 0, otherwise +% | maxValue, if src(x,y) > T(x,y) +% dst(x,y) = | +% | 0, otherwise % % * __BinaryInv__ % -% | 0, if src(x,y) > T(x,y) -% dst(x,y) = | -% | maxValue, otherwise +% | 0, if src(x,y) > T(x,y) +% dst(x,y) = | +% | maxValue, otherwise % % where `T(x,y)` is a threshold calculated individually for each pixel (see % `Method` parameter). diff --git a/+cv/add.m b/+cv/add.m index 1db04d1f4..08a49343f 100644 --- a/+cv/add.m +++ b/+cv/add.m @@ -1,7 +1,7 @@ %ADD Calculates the per-element sum of two arrays or an array and a scalar % -% dst = cv.add(src1, src2) -% dst = cv.add(src1, src2, 'OptionName',optionValue, ...) +% dst = cv.add(src1, src2) +% dst = cv.add(src1, src2, 'OptionName',optionValue, ...) % % ## Input % * __src1__ first input array or a scalar. @@ -9,41 +9,41 @@ % % ## Output % * __dst__ output array of the same size and number of channels as the input -% array(s). The depth is defined by `DType` or that of `src1`/`src2`. +% array(s). The depth is defined by `DType` or that of `src1`/`src2`. % % ## Options % * __Mask__ optional operation mask; this is an 8-bit single channel array -% that specifies elements of the output array to be changed. Not set by -% default. +% that specifies elements of the output array to be changed. Not set by +% default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % * __DType__ optional depth of the output array: `uint8`, `int16`, `double`, -% etc. (see the discussion below). Must be specified if input arrays are -% of different types. default -1 +% etc. (see the discussion below). Must be specified if input arrays are of +% different types. default -1 % % The function cv.add calculates: % % * Sum of two arrays when both input arrays have the same size and the same % number of channels: % -% dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0 +% dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0 % % * Sum of an array and a scalar when `src2` is constructed from Scalar or has % the same number of elements as `size(src1,3)`: % -% dst(I) = saturate(src1(I) + src2) if mask(I) != 0 +% dst(I) = saturate(src1(I) + src2) if mask(I) != 0 % % * Sum of a scalar and an array when `src1` is constructed from Scalar or has % the same number of elements as `size(src2,3)`: % -% dst(I) = saturate(src1 + src2(I)) if mask(I) != 0 +% dst(I) = saturate(src1 + src2(I)) if mask(I) != 0 % % where `I` is a multi-dimensional index of array elements. In case of % multi-channel arrays, each channel is processed independently. % % The first function in the list above can be replaced with matrix expressions: % -% dst = src1 + src2; +% dst = src1 + src2; % % The input arrays and the output array can all have the same or different % depths. For example, you can add a 16-bit unsigned array to a 8-bit signed diff --git a/+cv/addWeighted.m b/+cv/addWeighted.m index 7c4d9be94..406f5acf1 100644 --- a/+cv/addWeighted.m +++ b/+cv/addWeighted.m @@ -1,7 +1,7 @@ %ADDWEIGHTED Calculates the weighted sum of two arrays % -% dst = cv.addWeighted(src1, alpha, src2, beta, gamma) -% dst = cv.addWeighted(..., 'OptionName',optionValue, ...) +% dst = cv.addWeighted(src1, alpha, src2, beta, gamma) +% dst = cv.addWeighted(..., 'OptionName',optionValue, ...) % % ## Input % * __src1__ first input array. @@ -11,19 +11,19 @@ % * __gamma__ scalar added to each sum. % % ## Output -% * __dst__ output array that has the same size and number of channels as -% the input arrays. +% * __dst__ output array that has the same size and number of channels as the +% input arrays. % % ## Options % * __DType__ optional depth of the output array: `uint8`, `int16`, `double`, -% etc. Must be specified if input arrays are of different types. When -% both input arrays have the same depth, `DType` can be set to -1, which -% will be equivalent to `class(src1)`. default -1 +% etc. Must be specified if input arrays are of different types. When both +% input arrays have the same depth, `DType` can be set to -1, which will be +% equivalent to `class(src1)`. default -1 % % The function cv.addWeighted calculates the weighted sum of two arrays as % follows: % -% dst(I) = saturate(src1(I)*alpha + src2(I)*beta + gamma) +% dst(I) = saturate(src1(I)*alpha + src2(I)*beta + gamma) % % where `I` is a multi-dimensional index of array elements. In case of % multi-channel arrays, each channel is processed independently. diff --git a/+cv/applyColorMap.m b/+cv/applyColorMap.m index 1f9ad5b71..fa9553519 100644 --- a/+cv/applyColorMap.m +++ b/+cv/applyColorMap.m @@ -1,35 +1,35 @@ %APPLYCOLORMAP Applies a GNU Octave/MATLAB equivalent colormap on a given image % -% dst = cv.applyColorMap(src, colormap) -% dst = cv.applyColorMap(src, userColor) -% dst = cv.applyColorMap(..., 'OptionName',optionValue, ...) +% dst = cv.applyColorMap(src, colormap) +% dst = cv.applyColorMap(src, userColor) +% dst = cv.applyColorMap(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ The source image, grayscale or color of type `uint8`. % * __colormap__ The colormap to apply. A string, one of: -% * __Autumn__ Shades of red and yellow color map. -% * __Bone__ Gray-scale with a tinge of blue color map. -% * __Jet__ Variant of HSV. -% * __Winter__ Shades of blue and green color map. -% * __Rainbow__ Red-orange-yellow-green-blue-violet color map. -% * __Ocean__ Black to white with shades of blue color map. -% * __Summer__ Shades of green and yellow colormap. -% * __Spring__ Shades of magenta and yellow color map. -% * __Cool__ Shades of cyan and magenta color map. -% * __HSV__ Hue-saturation-value color map. -% * __Pink__ Pastel shades of pink color map. -% * __Hot__ Black-red-yellow-white color map. -% * __Parula__ Blue-green-orange-yellow color map. +% * __Autumn__ Shades of red and yellow color map. +% * __Bone__ Gray-scale with a tinge of blue color map. +% * __Jet__ Variant of HSV. +% * __Winter__ Shades of blue and green color map. +% * __Rainbow__ Red-orange-yellow-green-blue-violet color map. +% * __Ocean__ Black to white with shades of blue color map. +% * __Summer__ Shades of green and yellow colormap. +% * __Spring__ Shades of magenta and yellow color map. +% * __Cool__ Shades of cyan and magenta color map. +% * __HSV__ Hue-saturation-value color map. +% * __Pink__ Pastel shades of pink color map. +% * __Hot__ Black-red-yellow-white color map. +% * __Parula__ Blue-green-orange-yellow color map. % * __userColor__ The colormap to apply of type `uint8` (1 or 3 channels) and -% length 256. +% length 256. % % ## Output % * __dst__ The result is the colormapped source image. Same row/column size -% and same type as `src`. +% and same type as `src`. % % ## Options % * __FlipChannels__ whether to flip the order of color channels in output -% `dst`, from OpenCV's BGR to between MATLAB's RGB. default true +% `dst`, from OpenCV's BGR to between MATLAB's RGB. default true % % The human perception isn't built for observing fine changes in grayscale % images. Human eyes are more sensitive to observing changes between colors, @@ -46,26 +46,26 @@ % image. The following sample code takes an image and applies a Jet colormap % on it and shows the result: % -% % We need an input image. (can be grayscale or color) -% img_in = im2uint8(mat2gray(peaks(500))); +% % We need an input image. (can be grayscale or color) +% img_in = im2uint8(mat2gray(peaks(500))); % -% % Apply the colormap -% %img_color2 = im2uint8(ind2rgb(img_in, jet(256))); -% img_color = cv.applyColorMap(img_in, 'Jet'); +% % Apply the colormap +% %img_color2 = im2uint8(ind2rgb(img_in, jet(256))); +% img_color = cv.applyColorMap(img_in, 'Jet'); % -% % Show the result -% imshow(img_color) +% % Show the result +% imshow(img_color) % % ## Example % -% cmaps = {'Autumn', 'Bone', 'Jet', 'Winter', 'Rainbow', 'Ocean', ... -% 'Summer', 'Spring', 'Cool', 'HSV', 'Pink', 'Hot', 'Parula'}; -% img = cell2mat(cellfun(@(cmap) ... -% cv.applyColorMap(repmat(uint8(0:255), 20, 1), cmap), cmaps(:), ... -% 'UniformOutput',false)); -% image(img) -% set(gca, 'YTick', 10:20:20*numel(cmaps), 'YTickLabel',cmaps) -% title('Colormaps') +% cmaps = {'Autumn', 'Bone', 'Jet', 'Winter', 'Rainbow', 'Ocean', ... +% 'Summer', 'Spring', 'Cool', 'HSV', 'Pink', 'Hot', 'Parula'}; +% img = cell2mat(cellfun(@(cmap) ... +% cv.applyColorMap(repmat(uint8(0:255), 20, 1), cmap), cmaps(:), ... +% 'UniformOutput',false)); +% image(img) +% set(gca, 'YTick', 10:20:20*numel(cmaps), 'YTickLabel',cmaps) +% title('Colormaps') % % See also: cv.LUT, ind2rgb, colormap, autumn, bone, jet, winter, summer, % spring, cool, hsv, pink, hot, parula diff --git a/+cv/approxPolyDP.m b/+cv/approxPolyDP.m index 24017ec75..164a9e8ed 100644 --- a/+cv/approxPolyDP.m +++ b/+cv/approxPolyDP.m @@ -1,29 +1,29 @@ %APPROXPOLYDP Approximates a polygonal curve(s) with the specified precision % -% approxCurve = cv.approxPolyDP(curve) -% approxCurve = cv.approxPolyDP(curve, 'OptionName', optionValue, ...) +% approxCurve = cv.approxPolyDP(curve) +% approxCurve = cv.approxPolyDP(curve, 'OptionName', optionValue, ...) % % ## Input % * __curve__ Input vector of 2D points stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). -% Supports integer (`int32`) and floating-point (`single`) classes. +% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% Supports integer (`int32`) and floating-point (`single`) classes. % % ## Output % * __approxCurve__ Result of the approximation. The type should match the -% type of the input curve. The shape should also match the input curve -% (numeric or cell array). +% type of the input curve. The shape should also match the input curve +% (numeric or cell array). % % ## Options % * __Epsilon__ Parameter specifying the approximation accuracy. This is the -% maximum distance between the original curve and its approximation. -% default 2.0 +% maximum distance between the original curve and its approximation. +% default 2.0 % * __Closed__ If true, the approximated curve is closed (its first and last -% vertices are connected). Otherwise, it is not closed. default true +% vertices are connected). Otherwise, it is not closed. default true % % The function cv.approxPolyDP approximates a curve or a polygon with another % curve/polygon with less vertices so that the distance between them is less % or equal to the specified precision. It uses the -% [Douglas-Peucker algorithm](http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm). +% [Douglas-Peucker algorithm](https://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm). % % See also: cv.arcLength % diff --git a/+cv/arcLength.m b/+cv/arcLength.m index fe1b9a4a8..45d44be23 100644 --- a/+cv/arcLength.m +++ b/+cv/arcLength.m @@ -1,18 +1,18 @@ %ARCLENGTH Calculates a contour perimeter or a curve length % -% len = cv.arcLength(curve) -% len = cv.arcLength(curve, 'OptionName', optionValue) +% len = cv.arcLength(curve) +% len = cv.arcLength(curve, 'OptionName', optionValue) % % ## Input % * __curve__ Input vector of 2D points stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). % % ## Output % * __len__ Output length/perimeter. % % ## Options % * __Closed__ Flag indicating whether the curve is closed or not. -% default false. +% default false. % % The function computes a curve length or a closed contour perimeter. % diff --git a/+cv/arrowedLine.m b/+cv/arrowedLine.m index 7f961c26f..aa42bcf8d 100644 --- a/+cv/arrowedLine.m +++ b/+cv/arrowedLine.m @@ -1,7 +1,7 @@ %ARROWEDLINE Draws an arrow segment pointing from the first point to the second one % -% img = cv.arrowedLine(img, pt1, pt2) -% [...] = cv.arrowedLine(..., 'OptionName', optionValue, ...) +% img = cv.arrowedLine(img, pt1, pt2) +% [...] = cv.arrowedLine(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image where the arrow is drawn. @@ -15,12 +15,12 @@ % * __Color__ Line color. default is a black color % * __Thickness__ Line thickness. default 1. % * __LineType__ Type of the line. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the point coordinates. default 0. % * __TipLength__ The length of the arrow tip in relation to the arrow length. -% default 0.1 +% default 0.1 % % The function cv.arrowedLine draws an arrow between `pt1` and `pt2` points % in the image. diff --git a/+cv/batchDistance.m b/+cv/batchDistance.m index 64c0c65a7..1d8f1b4f1 100644 --- a/+cv/batchDistance.m +++ b/+cv/batchDistance.m @@ -1,29 +1,29 @@ %BATCHDISTANCE Naive nearest neighbor finder % -% [dst,nidx] = cv.batchDistance(src1, src2) -% [...] = cv.batchDistance(..., 'OptionName', optionValue, ...) +% [dst,nidx] = cv.batchDistance(src1, src2) +% [...] = cv.batchDistance(..., 'OptionName', optionValue, ...) % % ## Input -% * __src1__ samples matrix of size N1-by-M, type either 'single' or 'uint8' -% * __src2__ samples matrix of size N2-by-M, type either 'single' or 'uint8' +% * __src1__ samples matrix of size N1-by-M, type either `single` or `uint8` +% * __src2__ samples matrix of size N2-by-M, type either `single` or `uint8` % % ## Output % * __dst__ distance matrix (see description below). % * __nidx__ zero-based indices of nearest neighbors (matrix of size N1-by-K). -% Only computed if `K>0`, otherwise an empty matrix is returned. +% Only computed if `K>0`, otherwise an empty matrix is returned. % % ## Options % * __DType__ default -1 % * __NormType__ Distance metric used. Default 'L2' -% * __K__ Number of nearest neighbors in to find. If `K=0` (the default), -% the full pairwaise distance matrix is computed (of size N1-by-N2), -% otherwise only distances to the K-nearest neighbors is returned -% (matrix of size N1-by-K). +% * __K__ Number of nearest neighbors in to find. If `K=0` (the default), the +% full pairwaise distance matrix is computed (of size N1-by-N2), otherwise +% only distances to the K-nearest neighbors is returned (matrix of size +% N1-by-K). % * __Mask__ Not set by default. % * __Update__ default 0. % * __CrossCheck__ default false. % -% See [Nearest neighbor search](http://en.wikipedia.org/wiki/Nearest_neighbor_search). +% See [Nearest neighbor search](https://en.wikipedia.org/wiki/Nearest_neighbor_search). % % See also: pdist2, knnsearch % diff --git a/+cv/bilateralFilter.m b/+cv/bilateralFilter.m index ca5a75c05..4fb5c1559 100644 --- a/+cv/bilateralFilter.m +++ b/+cv/bilateralFilter.m @@ -1,7 +1,7 @@ %BILATERALFILTER Applies the bilateral filter to an image % -% result = cv.bilateralFilter(img) -% result = cv.bilateralFilter(img, 'OptionName', optionValue, ...) +% result = cv.bilateralFilter(img) +% result = cv.bilateralFilter(img, 'OptionName', optionValue, ...) % % ## Input % * __img__ Source 8-bit or floating-point, 1-channel or 3-channel image. @@ -11,19 +11,19 @@ % % ## Options % * __Diameter__ Diameter of each pixel neighborhood that is used during -% filtering. If it is non-positive, it is computed from `SigmaSpace`. -% Default: 7 +% filtering. If it is non-positive, it is computed from `SigmaSpace`. +% Default 7 % * __SigmaColor__ Filter sigma in the color space. A larger value of the -% parameter means that farther colors within the pixel neighborhood (see -% `SigmaSpace`) will be mixed together, resulting in larger areas of -% semi-equal color. Default: 50.0 +% parameter means that farther colors within the pixel neighborhood (see +% `SigmaSpace`) will be mixed together, resulting in larger areas of +% semi-equal color. Default 50.0 % * __SigmaSpace__ Filter sigma in the coordinate space. A larger value of the -% parameter means that farther pixels will influence each other as long -% as their colors are close enough (see `SigmaColor`). When `Diameter>0`, -% it specifies the neighborhood size regardless of `SigmaSpace`. -% Otherwise, `Diameter` is proportional to `SigmaSpace`. Default: 50.0 +% parameter means that farther pixels will influence each other as long as +% their colors are close enough (see `SigmaColor`). When `Diameter>0`, it +% specifies the neighborhood size regardless of `SigmaSpace`. Otherwise, +% `Diameter` is proportional to `SigmaSpace`. Default 50.0 % * __BorderType__ border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. Default: 'Default' +% image. See cv.copyMakeBorder. Default 'Default' % % The function applies bilateral filtering to the input image, as described % in [CVonline]. cv.bilateralFilter can reduce unwanted noise very well while diff --git a/+cv/bitwise_and.m b/+cv/bitwise_and.m index 21efc03bb..e17954182 100644 --- a/+cv/bitwise_and.m +++ b/+cv/bitwise_and.m @@ -1,22 +1,21 @@ %BITWISE_AND Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar % -% dst = cv.bitwise_and(src1, src2) -% dst = cv.bitwise_and(src1, src2, 'OptionName', optionValue, ...) +% dst = cv.bitwise_and(src1, src2) +% dst = cv.bitwise_and(src1, src2, 'OptionName', optionValue, ...) % % ## Input % * __src1__ first input array or a scalar. -% * __src2__ second input array or a scalar. In case both are array, -% they must have the same size and type. +% * __src2__ second input array or a scalar. In case both are array, they must +% have the same size and type. % % ## Output % * __dst__ output array that has the same size and type as the input arrays. % % ## Options % * __Mask__ optional operation mask, 8-bit single channel array, that -% specifies elements of the output array to be changed. Not set by -% default. +% specifies elements of the output array to be changed. Not set by default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % % Computes bitwise conjunction of the two arrays (`dst = src1 & src2`). % @@ -24,17 +23,17 @@ % % * Two arrays when `src1` and `src2` have the same size: % -% dst(I) = src1(I) AND src2(I) if mask(I) != 0 +% dst(I) = src1(I) AND src2(I) if mask(I) != 0 % % * An array and a scalar when `src2` is constructed from Scalar or has the % same number of elements as `size(src1,3)`: % -% dst(I) = src1(I) AND src2 if mask(I) != 0 +% dst(I) = src1(I) AND src2 if mask(I) != 0 % % * A scalar and an array when `src1` is constructed from Scalar or has the % same number of elements as `size(src2,3)`: % -% dst(I) = src1 AND src2(I) if mask(I) != 0 +% dst(I) = src1 AND src2(I) if mask(I) != 0 % % In case of floating-point arrays, their machine-specific bit representations % (usually IEEE754-compliant) are used for the operation. In case of diff --git a/+cv/bitwise_not.m b/+cv/bitwise_not.m index b74115ef1..0f721ff75 100644 --- a/+cv/bitwise_not.m +++ b/+cv/bitwise_not.m @@ -1,7 +1,7 @@ %BITWISE_NOT Inverts every bit of an array % -% dst = cv.bitwise_not(src) -% dst = cv.bitwise_not(src, 'OptionName', optionValue, ...) +% dst = cv.bitwise_not(src) +% dst = cv.bitwise_not(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input array. @@ -11,14 +11,13 @@ % % ## Options % * __Mask__ optional operation mask, 8-bit single channel array, that -% specifies elements of the output array to be changed. Not set by -% default. +% specifies elements of the output array to be changed. Not set by default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % % The function calculates per-element bit-wise inversion of the input array. % -% dst(I) = NOT src(I) +% dst(I) = NOT src(I) % % In case of a floating-point input array, its machine-specific bit % representation (usually IEEE754-compliant) is used for the operation. In diff --git a/+cv/bitwise_or.m b/+cv/bitwise_or.m index e0c07e57a..832f1bd8b 100644 --- a/+cv/bitwise_or.m +++ b/+cv/bitwise_or.m @@ -1,38 +1,37 @@ %BITWISE_OR Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar % -% dst = cv.bitwise_or(src1, src2) -% dst = cv.bitwise_or(src1, src2, 'OptionName', optionValue, ...) +% dst = cv.bitwise_or(src1, src2) +% dst = cv.bitwise_or(src1, src2, 'OptionName', optionValue, ...) % % ## Input % * __src1__ first input array or a scalar. -% * __src2__ second input array or a scalar. In case both are array, -% they must have the same size and type. +% * __src2__ second input array or a scalar. In case both are array, they must +% have the same size and type. % % ## Output % * __dst__ output array that has the same size and type as the input arrays. % % ## Options % * __Mask__ optional operation mask, 8-bit single channel array, that -% specifies elements of the output array to be changed. Not set by -% default. +% specifies elements of the output array to be changed. Not set by default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % % The function calculates the per-element bit-wise logical disjunction for: % % * Two arrays when `src1` and `src2` have the same size: % -% dst(I) = src1(I) OR src2(I) if mask(I) != 0 +% dst(I) = src1(I) OR src2(I) if mask(I) != 0 % % * An array and a scalar when `src2` is constructed from Scalar or has the % same number of elements as `size(src1,3)`: % -% dst(I) = src1(I) OR src2 if mask(I) != 0 +% dst(I) = src1(I) OR src2 if mask(I) != 0 % % * A scalar and an array when `src1` is constructed from Scalar or has the % same number of elements as `size(src2,3)`: % -% dst(I) = src1 OR src2(I) if mask(I) != 0 +% dst(I) = src1 OR src2(I) if mask(I) != 0 % % In case of floating-point arrays, their machine-specific bit representations % (usually IEEE754-compliant) are used for the operation. In case of diff --git a/+cv/bitwise_xor.m b/+cv/bitwise_xor.m index 13f19f954..23f26aafa 100644 --- a/+cv/bitwise_xor.m +++ b/+cv/bitwise_xor.m @@ -1,39 +1,38 @@ %BITWISE_XOR Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar % -% dst = cv.bitwise_xor(src1, src2) -% dst = cv.bitwise_xor(src1, src2, 'OptionName', optionValue, ...) +% dst = cv.bitwise_xor(src1, src2) +% dst = cv.bitwise_xor(src1, src2, 'OptionName', optionValue, ...) % % ## Input % * __src1__ first input array or a scalar. -% * __src2__ second input array or a scalar. In case both are array, -% they must have the same size and type. +% * __src2__ second input array or a scalar. In case both are array, they must +% have the same size and type. % % ## Output % * __dst__ output array that has the same size and type as the input arrays. % % ## Options % * __Mask__ optional operation mask, 8-bit single channel array, that -% specifies elements of the output array to be changed. Not set by -% default. +% specifies elements of the output array to be changed. Not set by default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % % The function calculates the per-element bit-wise logical "exclusive-or" % operation for: % % * Two arrays when `src1` and `src2` have the same size: % -% dst(I) = src1(I) XOR src2(I) if mask(I) != 0 +% dst(I) = src1(I) XOR src2(I) if mask(I) != 0 % % * An array and a scalar when `src2` is constructed from Scalar or has the % same number of elements as `size(src1,3)`: % -% dst(I) = src1(I) XOR src2 if mask(I) != 0 +% dst(I) = src1(I) XOR src2 if mask(I) != 0 % % * A scalar and an array when `src1` is constructed from Scalar or has the % same number of elements as `size(src2,3)`: % -% dst(I) = src1 XOR src2(I) if mask(I) != 0 +% dst(I) = src1 XOR src2(I) if mask(I) != 0 % % In case of floating-point arrays, their machine-specific bit representations % (usually IEEE754-compliant) are used for the operation. In case of diff --git a/+cv/blendLinear.m b/+cv/blendLinear.m index e4157cf50..b46259a1d 100644 --- a/+cv/blendLinear.m +++ b/+cv/blendLinear.m @@ -1,10 +1,10 @@ %BLENDLINEAR Performs linear blending of two images % -% dst = cv.blendLinear(src1, src2, weights1, weights2) +% dst = cv.blendLinear(src1, src2, weights1, weights2) % % ## Input % * __src1__ First image, it has a depth of `uint8` or `single`, and any -% number of channels. +% number of channels. % * __src2__ second image of same size and type as `src1`. % * __weights1__ floating-point matrix of size same as input images. % * __weights2__ floating-point matrix of size same as input images. @@ -14,7 +14,7 @@ % % Performs linear blending of two images: % -% dst(i,j) = weights1(i,j) * src1(i,j) + weights2(i,j) * src2(i,j) +% dst(i,j) = weights1(i,j) * src1(i,j) + weights2(i,j) * src2(i,j) % % See also: cv.addWeighted, cv.add, imlincomb % diff --git a/+cv/blur.m b/+cv/blur.m index 1b8ad6586..a306a34ff 100644 --- a/+cv/blur.m +++ b/+cv/blur.m @@ -1,12 +1,12 @@ %BLUR Smoothes an image using the normalized box filter % -% dst = cv.blur(src) -% dst = cv.blur(src, 'OptionName',optionValue, ...) +% dst = cv.blur(src) +% dst = cv.blur(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image; it can have any number of channels, which are -% processed independently, but the depth should be `uint8`, `uint16`, -% `int16`, `single`, or `double`. +% processed independently, but the depth should be `uint8`, `uint16`, +% `int16`, `single`, or `double`. % % ## Output % * __dst__ output image of the same size and type as `src`. @@ -14,13 +14,13 @@ % ## Options % * __KSize__ blurring kernel size. default [5,5] % * __Anchor__ Anchor point `[x,y]`. The default value `[-1,-1]` means that -% the anchor is at the kernel center. +% the anchor is at the kernel center. % * __BorderType__ Border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. default 'Default' +% image. See cv.copyMakeBorder. default 'Default' % % The function smooths an image using the kernel: % -% K = ones(KSize) / prod(KSize) +% K = ones(KSize) / prod(KSize) % % See also: cv.boxFilter, cv.bilateralFilter, cv.GaussianBlur, cv.medianBlur, % imfilter, fspecial diff --git a/+cv/borderInterpolate.m b/+cv/borderInterpolate.m index dcadbc664..6e0c358d6 100644 --- a/+cv/borderInterpolate.m +++ b/+cv/borderInterpolate.m @@ -1,11 +1,11 @@ %BORDERINTERPOLATE Computes the source location of an extrapolated pixel % -% loc = cv.borderInterpolate(p, len) -% [...] = cv.borderInterpolate(..., 'OptionName', optionValue, ...) +% loc = cv.borderInterpolate(p, len) +% [...] = cv.borderInterpolate(..., 'OptionName', optionValue, ...) % % ## Input % * __p__ 0-based coordinate of the extrapolated pixel along one of the axes, -% likely `p < 0` or `p >= len`. +% likely `p < 0` or `p >= len`. % * __len__ Length of the array along the corresponding axis. % % ## Output @@ -13,14 +13,15 @@ % % ## Options % * __BorderType__ Border type, one of: -% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` -% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` -% * __Reflect__ `fedcba|abcdefgh|hgfedcb` -% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` -% * __Wrap__ `cdefgh|abcdefgh|abcdefg` -% * __Default__ same as 'Reflect101' (default) -% When `BorderType=='Constant'`, the function always returns -1, -% regardless of `p` and `len`. +% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` +% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` +% * __Reflect__ `fedcba|abcdefgh|hgfedcb` +% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` +% * __Wrap__ `cdefgh|abcdefgh|abcdefg` +% * __Default__ same as 'Reflect101' (default) +% +% When `BorderType=='Constant'`, the function always returns -1, regardless +% of `p` and `len`. % % The function computes and returns the coordinate of a donor pixel % corresponding to the specified extrapolated pixel when using the specified diff --git a/+cv/boundingRect.m b/+cv/boundingRect.m index d21249dc0..81c423e4c 100644 --- a/+cv/boundingRect.m +++ b/+cv/boundingRect.m @@ -1,12 +1,12 @@ %BOUNDINGRECT Calculates the up-right bounding rectangle of a point set % -% rct = cv.boundingRect(points) -% rct = cv.boundingRect(mask) +% rct = cv.boundingRect(points) +% rct = cv.boundingRect(mask) % % ## Input -% * __points__ Input 2D point set, stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). -% Supports integer (`int32`) and floating-point (`single`) classes. +% * __points__ Input 2D point set, stored in numeric array (Nx2/Nx1x2/1xNx2) +% or cell array of 2-element vectors (`{[x,y], ...}`). Supports integer +% (`int32`) and floating-point (`single`) classes. % * __mask__ Binary mask, a 1-channel NxM 8-bit or logical matrix. % % ## Output diff --git a/+cv/boxFilter.m b/+cv/boxFilter.m index 3316bae23..bd6ccc011 100644 --- a/+cv/boxFilter.m +++ b/+cv/boxFilter.m @@ -1,7 +1,7 @@ %BOXFILTER Blurs an image using the box filter % -% dst = cv.boxFilter(src) -% dst = cv.boxFilter(src, 'OptionName',optionValue, ...) +% dst = cv.boxFilter(src) +% dst = cv.boxFilter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image. @@ -11,24 +11,24 @@ % % ## Options % * __DDepth__ the output image depth (-1 to use `class(src)`). Default -1. -% See cv.filter2D for details. +% See cv.filter2D for details. % * __KSize__ blurring kernel size. Default [5,5] % * __Anchor__ anchor point `[x,y]`; default value [-1,-1] means that the -% anchor is at the kernel center. +% anchor is at the kernel center. % * __Normalize__ flag, specifying whether the kernel is normalized by its -% area or not. default true +% area or not. default true % * __BorderType__ border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. Default 'Default' +% image. See cv.copyMakeBorder. Default 'Default' % % The function smooths an image using the kernel: % -% K = alpha * ones(KSize) +% K = alpha * ones(KSize) % % where: % -% | 1/prod(KSize) when Normalize=true -% alpha = | -% | 1 otherwise +% | 1/prod(KSize) when Normalize=true +% alpha = | +% | 1 otherwise % % Unnormalized box filter is useful for computing various integral % characteristics over each pixel neighborhood, such as covariance matrices diff --git a/+cv/boxPoints.m b/+cv/boxPoints.m index 75299b655..f72ee023d 100644 --- a/+cv/boxPoints.m +++ b/+cv/boxPoints.m @@ -1,19 +1,17 @@ %BOXPOINTS Finds the four vertices of a rotated rectangle % -% points = cv.boxPoints(box) +% points = cv.boxPoints(box) % % ## Input -% * __box__ The input rotated rectangle. A structure with the following -% fields: -% * __center__ The rectangle mass center `[x,y]`. -% * __size__ Width and height of the rectangle `[w,h]`. -% * __angle__ The rotation angle in a clockwise direction. -% When the angle is 0, 90, 180, 270 etc., the -% rectangle becomes an up-right rectangle. +% * __box__ The input rotated rectangle. A structure with the following fields: +% * __center__ The rectangle mass center `[x,y]`. +% * __size__ Width and height of the rectangle `[w,h]`. +% * __angle__ The rotation angle in a clockwise direction. When the angle is +% 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. % % ## Output % * __points__ The output array of four vertices of rectangles. A 4-by-2 -% numeric matrix, each row a point: `[x1 y1; x2 y2; x3 y3; x4 y4]`. +% numeric matrix, each row a point: `[x1 y1; x2 y2; x3 y3; x4 y4]`. % % The function finds the four vertices of a rotated rectangle. This function % is useful to draw the rectangle. You can also use cv.RotatedRect.points diff --git a/+cv/buildOpticalFlowPyramid.m b/+cv/buildOpticalFlowPyramid.m index 5a60a9da2..9f490160a 100644 --- a/+cv/buildOpticalFlowPyramid.m +++ b/+cv/buildOpticalFlowPyramid.m @@ -1,34 +1,34 @@ %BUILDOPTICALFLOWPYRAMID Constructs the image pyramid which can be passed to cv.calcOpticalFlowPyrLK % -% pyramid = cv.buildOpticalFlowPyramid(img) -% [pyramid,maxLvl] = cv.buildOpticalFlowPyramid(img) -% pyramid = cv.buildOpticalFlowPyramid(img, 'OptionName',optionValue, ...) +% pyramid = cv.buildOpticalFlowPyramid(img) +% [pyramid,maxLvl] = cv.buildOpticalFlowPyramid(img) +% pyramid = cv.buildOpticalFlowPyramid(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ 8-bit input image. % % ## Output % * __pyramid__ output pyramid. A cell-array of matrices (pyramid levels), -% interleaved with corresponding gradients if `WithDerivatives` flag -% is true. Each gradient is a matrix of type `int16` of same size as -% pyramid at that level with 2-channels. +% interleaved with corresponding gradients if `WithDerivatives` flag is +% true. Each gradient is a matrix of type `int16` of same size as pyramid at +% that level with 2-channels. % * __maxLvl__ number of levels in constructed pyramid. Can be less than -% `MaxLevel`. +% `MaxLevel`. % % ## Options % * __WinSize__ window size of optical flow algorithm. Must be not less than -% `WinSize` argument of cv.calcOpticalFlowPyrLK. It is needed to -% calculate required padding for pyramid levels. default [21,21] +% `WinSize` argument of cv.calcOpticalFlowPyrLK. It is needed to calculate +% required padding for pyramid levels. default [21,21] % * __MaxLevel__ 0-based maximal pyramid level number. default 3 % * __WithDerivatives__ set to precompute gradients for the every pyramid -% level. If pyramid is constructed without the gradients then -% cv.calcOpticalFlowPyrLK will calculate them internally. default true +% level. If pyramid is constructed without the gradients then +% cv.calcOpticalFlowPyrLK will calculate them internally. default true % * __PyrBorder__ the border mode for pyramid layers. See cv.copyMakeBorder. -% default 'Reflect101' +% default 'Reflect101' % * __DerivBorder__ the border mode for gradients. See cv.copyMakeBorder. -% default 'Constant' +% default 'Constant' % * __TryReuseInputImage__ put ROI of input image into the pyramid if -% possible. You can pass false to force data copying. default true +% possible. You can pass false to force data copying. default true % % See also: cv.calcOpticalFlowPyrLK, cv.buildPyramid % diff --git a/+cv/buildPyramid.m b/+cv/buildPyramid.m index b849b6a01..a7b0dc334 100644 --- a/+cv/buildPyramid.m +++ b/+cv/buildPyramid.m @@ -1,22 +1,22 @@ %BUILDPYRAMID Constructs the Gaussian pyramid for an image % -% dst = cv.buildPyramid(src) -% dst = cv.buildPyramid(src, 'OptionName',optionValue, ...) +% dst = cv.buildPyramid(src) +% dst = cv.buildPyramid(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image. Check cv.pyrDown for the list of supported types. % % ## Output % * __dst__ Destination vector of `Maxlevel+1` images of the same type as -% `src`. A cell array of images. `dst{1}` will be the same as `src`. -% `dst{2}` is the next pyramid layer, a smoothed and down-sized `src`, -% and so on. +% `src`. A cell array of images. `dst{1}` will be the same as `src`. +% `dst{2}` is the next pyramid layer, a smoothed and down-sized `src`, and +% so on. % % ## Options % * __MaxLevel__ 0-based index of the last (the smallest) pyramid layer. It -% must be non-negative. default 5 +% must be non-negative. default 5 % * __BorderType__ Pixel extrapolation method, ('Constant' isn't supported). -% See cv.copyMakeBorder for details. Default 'Default' +% See cv.copyMakeBorder for details. Default 'Default' % % The function constructs a vector of images and builds the Gaussian pyramid % by recursively applying cv.pyrDown to the previously built pyramid layers, diff --git a/+cv/calcBackProject.m b/+cv/calcBackProject.m index c132513b6..eaa1b69f6 100644 --- a/+cv/calcBackProject.m +++ b/+cv/calcBackProject.m @@ -1,37 +1,36 @@ %CALCBACKPROJECT Calculates the back projection of a histogram % -% backProject = cv.calcBackProject(images, H, ranges) -% backProject = cv.calcBackProject(..., 'OptionName',optionValue, ...) +% backProject = cv.calcBackProject(images, H, ranges) +% backProject = cv.calcBackProject(..., 'OptionName',optionValue, ...) % % ## Input % * __images__ Source arrays. A numeric array, or cell array of numeric arrays -% are accepted. They all should have the same class (`uint8`, `uint16`, -% or `single`) and the same row/column size. Each of them can have an -% arbitrary number of channels. +% are accepted. They all should have the same class (`uint8`, `uint16`, or +% `single`) and the same row/column size. Each of them can have an arbitrary +% number of channels. % * __H__ Input histogram that can be dense or sparse. % * __ranges__ Cell-array of arrays of the histogram bin boundaries in each -% dimension. See cv.calcHist. +% dimension. See cv.calcHist. % % ## Output % * __backProject__ Destination back projection array that is a single-channel -% array of the same row/column size and depth as `images` (if numeric) -% or `images{1}` (if `images` is a cell array). Out of `uint8`, -% `uint16`, or `single` classes. Pay attention to the data type of the -% back projection array, where integer types are clamped to the maximum -% `intmax` if a value exceeds the largest possible integer of the type. +% array of the same row/column size and depth as `images` (if numeric) or +% `images{1}` (if `images` is a cell array). Out of `uint8`, `uint16`, or +% `single` classes. Pay attention to the data type of the back projection +% array, where integer types are clamped to the maximum `intmax` if a value +% exceeds the largest possible integer of the type. % % ## Options % * __Channels__ The list of channels used to compute the back projection (as -% 0-based indices). The number of channels must match the histogram -% dimensionality. The first array channels are numerated from `0` to -% `size(images{1},3)-1`, the second array channels are counted from -% `size(images{1},3)` to `size(images{1},3) + size(images{2},3)-1`, and -% so on. By default, all channels from all images are used, i.e default -% is `0:sum(cellfun(@(im)size(im,3), images))-1` when input `images` is -% a cell array, and `0:(size(images,3)-1)` when input `images` is a -% numeric array. -% * __Uniform__ Logical flag indicating whether the histogram is uniform -% or not (see above). default false. +% 0-based indices). The number of channels must match the histogram +% dimensionality. The first array channels are numerated from `0` to +% `size(images{1},3)-1`, the second array channels are counted from +% `size(images{1},3)` to `size(images{1},3) + size(images{2},3)-1`, and so +% on. By default, all channels from all images are used, i.e default is +% `0:sum(cellfun(@(im)size(im,3), images))-1` when input `images` is a cell +% array, and `0:(size(images,3)-1)` when input `images` is a numeric array. +% * __Uniform__ Logical flag indicating whether the histogram is uniform or +% not (see above). default false. % * __Scale__ Optional scale factor for the output back projection. default 1 % % The function cv.calcBackProject calculates the back project of the @@ -45,8 +44,8 @@ % find and track a bright-colored object in a scene: % % 1. Before tracking, show the object to the camera so that it covers almost -% the whole frame. Calculate a hue histogram. The histogram may have -% strong maximums, corresponding to the dominant colors in the object. +% the whole frame. Calculate a hue histogram. The histogram may have strong +% maximums, corresponding to the dominant colors in the object. % 2. When tracking, calculate a back projection of a hue plane of each input % video frame using that pre-computed histogram. Threshold the back % projection to suppress weak colors. It may also make sense to suppress diff --git a/+cv/calcBlurriness.m b/+cv/calcBlurriness.m index ef7f1ccb8..fa14a0a29 100644 --- a/+cv/calcBlurriness.m +++ b/+cv/calcBlurriness.m @@ -1,6 +1,6 @@ %CALCBLURRINESS Calculate image blurriness % -% blurriness = cv.calcBlurriness(img) +% blurriness = cv.calcBlurriness(img) % % ## Input % * __img__ input image. diff --git a/+cv/calcCovarMatrix.m b/+cv/calcCovarMatrix.m index 43a175d4a..3a4295470 100644 --- a/+cv/calcCovarMatrix.m +++ b/+cv/calcCovarMatrix.m @@ -1,7 +1,7 @@ %CALCCOVARMATRIX Calculates the covariance matrix of a set of vectors % -% [covar,mean] = cv.calcCovarMatrix(samples) -% [...] = cv.calcCovarMatrix(..., 'OptionName', optionValue, ...) +% [covar,mean] = cv.calcCovarMatrix(samples) +% [...] = cv.calcCovarMatrix(..., 'OptionName', optionValue, ...) % % ## Input % * __samples__ Samples stored as rows/columns of a single matrix. @@ -11,40 +11,37 @@ % * __mean__ Output array as the average value of the input vectors. % % ## Options -% * __Mean__ Input array as the average value of the input vectors. -% See 'UseAvg' below. It is not set by default +% * __Mean__ Input array as the average value of the input vectors. See +% `UseAvg` below. It is not set by default % * __Flags__ Operation flags. Default is equivalent to -% {'Normal',true, 'Rows',true} +% `{'Normal',true, 'Rows',true}` % * __Normal__ The covariance matrix will be a square matrix of the same -% size as the total number of elements in each input vector. One -% and only one of 'Scrambled' and 'Normal' must be specified. -% default true -% * __Scrambled__ The covariance matrix will be `nsamples x nsamples`. -% Such an unusual covariance matrix is used for fast cv.PCA of a -% set of very large vectors (see, for example, the EigenFaces -% technique for face recognition). Eigenvalues of this "scrambled" -% matrix match the eigenvalues of the true covariance matrix. The -% "true" eigenvectors can be easily calculated from the -% eigenvectors of the "scrambled" covariance matrix. default false -% * __Rows__ If the flag is specified, all the input vectors are stored -% as rows of the samples matrix. mean should be a single-row -% vector in this case. default true -% * __Cols__ If the flag is specified, all the input vectors are stored -% as columns of the samples matrix. mean should be a single-column -% vector in this case. default false -% * __Scale__ If the flag is specified, the covariance matrix is scaled. -% In the "normal" mode, scale is `1./nsamples`. In the "scrambled" -% mode, scale is the reciprocal of the total number of elements in -% each input vector. By default (if the flag is not specified), -% the covariance matrix is not scaled (`scale=1`). default false -% * __UseAvg__ If the flag is specified, the function does not calculate -% mean from the input vectors but, instead, uses the passed mean -% vector. This is useful if mean has been pre-calculated or known -% in advance, or if the covariance matrix is calculated by parts. -% In this case, mean is not a mean vector of the input sub-set of -% vectors but rather the mean vector of the whole set. -% default false -% * __CType__ type of the matrix. default is 'double' +% size as the total number of elements in each input vector. One and only +% one of 'Scrambled' and 'Normal' must be specified. default true +% * __Scrambled__ The covariance matrix will be `nsamples x nsamples`. Such an +% unusual covariance matrix is used for fast cv.PCA of a set of very large +% vectors (see, for example, the EigenFaces technique for face recognition). +% Eigenvalues of this "scrambled" matrix match the eigenvalues of the true +% covariance matrix. The "true" eigenvectors can be easily calculated from +% the eigenvectors of the "scrambled" covariance matrix. default false +% * __Rows__ If the flag is specified, all the input vectors are stored as +% rows of the samples matrix. mean should be a single-row vector in this +% case. default true +% * __Cols__ If the flag is specified, all the input vectors are stored as +% columns of the samples matrix. mean should be a single-column vector in +% this case. default false +% * __Scale__ If the flag is specified, the covariance matrix is scaled. In +% the "normal" mode, scale is `1./nsamples`. In the "scrambled" mode, scale +% is the reciprocal of the total number of elements in each input vector. By +% default (if the flag is not specified), the covariance matrix is not +% scaled (`scale=1`). default false +% * __UseAvg__ If the flag is specified, the function does not calculate mean +% from the input vectors but, instead, uses the passed mean vector. This is +% useful if mean has been pre-calculated or known in advance, or if the +% covariance matrix is calculated by parts. In this case, mean is not a mean +% vector of the input sub-set of vectors but rather the mean vector of the +% whole set. default false +% * __CType__ type of the matrix. default is `double` % % The function cv.calcCovarMatrix calculates the covariance matrix and, % optionally, the mean vector of the set of input vectors. diff --git a/+cv/calcHist.m b/+cv/calcHist.m index e31db783c..db7641275 100644 --- a/+cv/calcHist.m +++ b/+cv/calcHist.m @@ -1,75 +1,72 @@ %CALCHIST Calculates a histogram of a set of arrays % -% H = cv.calcHist(images, ranges) -% H = cv.calcHist(..., 'OptionName',optionValue, ...) +% H = cv.calcHist(images, ranges) +% H = cv.calcHist(..., 'OptionName',optionValue, ...) % % ## Input % * __images__ Source arrays. A numeric array, or cell array of numeric arrays -% are accepted. They all should have the same class (`uint8`, `uint16`, -% or `single`) and the same row/column size. Each of them can have an -% arbitrary number of channels. Note that passing `{img1, img2, ...}` as -% input is similar to using `cat(3, img1, img2, ...)`, i.e the function -% computes the histogram from channels of input arrays. +% are accepted. They all should have the same class (`uint8`, `uint16`, or +% `single`) and the same row/column size. Each of them can have an arbitrary +% number of channels. Note that passing `{img1, img2, ...}` as input is +% similar to using `cat(3, img1, img2, ...)`, i.e the function computes the +% histogram from channels of input arrays. % * __ranges__ Cell-array of length `N` (histogram dimensionality) of the -% histogram bin boundaries in each dimension. -% * When the histogram is uniform (`Uniform=true`), then for each -% dimension `i` it is enough to specify the lower (inclusive) boundary -% `L(1) = ranges{i}(1)` of the first histogram bin and the upper -% (exclusive) boundary `U(n) = ranges{i}(end)` for the last histogram -% bin `HistSize(i)`. That is, in case of a uniform histogram each of -% `ranges{i}` is an array of 2 elements forming an interval `[L,U)` -% which is automatically divided according to `HistSize(i)`. -% * When the histogram is not uniform (`Uniform=false`), then each of -% `ranges{i}` contains `HistSize(i)+1` elements, specifying the bin -% edges of dimension `i`: `L(1), L(2), ..., L(n), U(n)` forming the -% half-open intervals: -% `[L(1), U(1)), [L(2), U(2)), ..., [L(n-1), U(n-1)), [L(n), U(n))` -% where `U(1)==L(2), U(2)==L(3), ..., U(n-1)==L(n)`, and `n` is the -% histogram size of the current dimension (`n = HistSize(i)`). The -% array elements, that are not between `L(1)` and `U(n)`, are not -% counted in the histogram. +% histogram bin boundaries in each dimension. +% * When the histogram is uniform (`Uniform=true`), then for each dimension +% `i` it is enough to specify the lower (inclusive) boundary +% `L(1) = ranges{i}(1)` of the first histogram bin and the upper +% (exclusive) boundary `U(n) = ranges{i}(end)` for the last histogram bin +% `HistSize(i)`. That is, in case of a uniform histogram each of +% `ranges{i}` is an array of 2 elements forming an interval `[L,U)` which +% is automatically divided according to `HistSize(i)`. +% * When the histogram is not uniform (`Uniform=false`), then each of +% `ranges{i}` contains `HistSize(i)+1` elements, specifying the bin edges +% of dimension `i`: `L(1), L(2), ..., L(n), U(n)` forming the half-open +% intervals: `[L(1), U(1)), [L(2), U(2)), ..., [L(n-1), U(n-1)), [L(n), U(n))` +% where `U(1)==L(2), U(2)==L(3), ..., U(n-1)==L(n)`, and `n` is the +% histogram size of the current dimension (`n = HistSize(i)`). The array +% elements, that are not between `L(1)` and `U(n)`, are not counted in the +% histogram. % % ## Output % * __H__ Output histogram, which is a dense or sparse N-dimensional array of -% type `single` (`N` is the histogram dimensionality that must be -% positive and not greater than 32 in the current OpenCV version). The -% size of the output N-D array is -% `HistSize(1)-by-HistSize(2)-by-...-by-HistSize(N)`. +% type `single` (`N` is the histogram dimensionality that must be positive +% and not greater than 32 in the current OpenCV version). The size of the +% output N-D array is `HistSize(1)-by-HistSize(2)-by-...-by-HistSize(N)`. % % ## Options % * __Channels__ List of channels used to compute the histogram (as 0-based -% indices). The number of channels must match the histogram -% dimensionality `N`. The first array channels are numerated from `0` to -% `size(images{1},3)-1`, the second array channels are counted from -% `size(images{1},3)` to `size(images{1},3) + size(images{2},3)-1`, and -% so on. By default, all channels from all images are used to compute -% the histogram, i.e default is `0:sum(cellfun(@(im)size(im,3), images))-1` -% when input `images` is a cell array, and `0:(size(images,3)-1)` when -% input `images` is a numeric array. +% indices). The number of channels must match the histogram dimensionality +% `N`. The first array channels are numerated from `0` to +% `size(images{1},3)-1`, the second array channels are counted from +% `size(images{1},3)` to `size(images{1},3) + size(images{2},3)-1`, and so +% on. By default, all channels from all images are used to compute the +% histogram, i.e default is `0:sum(cellfun(@(im)size(im,3), images))-1` when +% input `images` is a cell array, and `0:(size(images,3)-1)` when input +% `images` is a numeric array. % * __Mask__ Optional mask. If the matrix is not empty, it must be an 8-bit or -% logical array of the same row/column size as `images{i}`. The non-zero -% mask elements mark the array elements (pixels) counted in the -% histogram. Not set by default. +% logical array of the same row/column size as `images{i}`. The non-zero +% mask elements mark the array elements (pixels) counted in the histogram. +% Not set by default. % * __HistSize__ Array of histogram sizes in each dimension. Use together -% with the `Uniform` flag. Default is `cellfun(@numel,ranges)-1`. -% * When the histogram is uniform, the range specified in `ranges{i}` is -% divided into `HistSize(i)` uniform bins. The interval is divided into -% bins using equally-spaced boundaries defined as: -% `ranges{i} = linspace(ranges{i}(1), ranges{i}(end), HistSize(i)+1)`. -% * When the histogram is not uniform, `ranges{i}` is used as is for the -% bin boundaries without considering `HistSize`. -% * __Uniform__ Logical flag indicating whether the histogram is uniform -% or not (see above). default false. +% with the `Uniform` flag. Default is `cellfun(@numel,ranges)-1`. +% * When the histogram is uniform, the range specified in `ranges{i}` is +% divided into `HistSize(i)` uniform bins. The interval is divided into +% bins using equally-spaced boundaries defined as: +% `ranges{i} = linspace(ranges{i}(1), ranges{i}(end), HistSize(i)+1)`. +% * When the histogram is not uniform, `ranges{i}` is used as is for the +% bin boundaries without considering `HistSize`. +% * __Uniform__ Logical flag indicating whether the histogram is uniform or +% not (see above). default false. % * __Hist__ Input histogram, used in accumulation mode. Either a dense or -% sparse array, see `H`. If it is set, the output histogram is -% initialized with it instead of being cleared in the beginning when it -% is allocated. This feature enables you to compute a single histogram -% from several sets of arrays, or to update the histogram in time. -% Not set by default. +% sparse array, see `H`. If it is set, the output histogram is initialized +% with it instead of being cleared in the beginning when it is allocated. +% This feature enables you to compute a single histogram from several sets +% of arrays, or to update the histogram in time. Not set by default. % * __Sparse__ Logical flag indicating whether the output should be sparse. -% default false (i.e output histogram is a dense array). Keep in mind -% that MATLAB only supports 2D sparse matrices, so use you must use -% dense arrays if the histogram has more than two dimensions. +% default false (i.e output histogram is a dense array). Keep in mind that +% MATLAB only supports 2D sparse matrices, so use you must use dense arrays +% if the histogram has more than two dimensions. % % The function cv.calcHist calculates the histogram of one or more arrays. The % elements of a tuple used to increment a histogram bin are taken from the @@ -79,36 +76,36 @@ % The sample below shows how to compute a 2D Hue-Saturation histogram for a % color image: % -% hsv = cv.cvtColor(img, 'RGB2HSV'); -% edges = {linspace(0,180,30+1), linspace(0,256,32+1)}; -% H = cv.calcHist(hsv(:,:,1:2), edges); +% hsv = cv.cvtColor(img, 'RGB2HSV'); +% edges = {linspace(0,180,30+1), linspace(0,256,32+1)}; +% H = cv.calcHist(hsv(:,:,1:2), edges); % % Here is another example showing the different options: % -% % read some image, and convert to HSV colorspace -% imgRGB = imread(fullfile(mexopencv.root(),'test','img001.jpg')); -% imgHSV = cv.cvtColor(imgRGB, 'RGB2HSV'); +% % read some image, and convert to HSV colorspace +% imgRGB = imread(fullfile(mexopencv.root(),'test','img001.jpg')); +% imgHSV = cv.cvtColor(imgRGB, 'RGB2HSV'); % -% % quantize the hue to 30 levels, and the saturation to 32 levels -% histSize = [30, 32]; -% hranges = linspace(0, 180, histSize(1)+1); % hue varies from 0 to 179 -% sranges = linspace(0, 256, histSize(2)+1); % sat varies from 0 to 255 -% ranges = {hranges, sranges}; +% % quantize the hue to 30 levels, and the saturation to 32 levels +% histSize = [30, 32]; +% hranges = linspace(0, 180, histSize(1)+1); % hue varies from 0 to 179 +% sranges = linspace(0, 256, histSize(2)+1); % sat varies from 0 to 255 +% ranges = {hranges, sranges}; % -% % one way -% H = cv.calcHist(imgHSV(:,:,[1 2]), ranges); +% % one way +% H = cv.calcHist(imgHSV(:,:,[1 2]), ranges); % -% % another way -% H = cv.calcHist(imgHSV, ranges, 'Channels',[1 2]-1, 'HistSize',histSize); +% % another way +% H = cv.calcHist(imgHSV, ranges, 'Channels',[1 2]-1, 'HistSize',histSize); % -% % or similarly -% H = cv.calcHist({imgHSV(:,:,1), imgHSV(:,:,2)}, {[0,180], [0,256]}, ... -% 'HistSize',histSize, 'Uniform',true); +% % or similarly +% H = cv.calcHist({imgHSV(:,:,1), imgHSV(:,:,2)}, {[0,180], [0,256]}, ... +% 'HistSize',histSize, 'Uniform',true); % -% % show H-S histogram -% imagesc(H, 'YData',[0 180], 'XData',[0 256]) -% axis image; colormap gray; colorbar -% ylabel('Hue'); xlabel('Saturation'); title('Histogram') +% % show H-S histogram +% imagesc(H, 'YData',[0 180], 'XData',[0 256]) +% axis image; colormap gray; colorbar +% ylabel('Hue'); xlabel('Saturation'); title('Histogram') % % See also: cv.calcBackProject, cv.compareHist, cv.EMD, hist, histc, % histogram, histcounts, histcounts2, discretize, imhist, hist3 diff --git a/+cv/calcOpticalFlowFarneback.m b/+cv/calcOpticalFlowFarneback.m index c0570b645..6a2bda127 100644 --- a/+cv/calcOpticalFlowFarneback.m +++ b/+cv/calcOpticalFlowFarneback.m @@ -1,50 +1,49 @@ %CALCOPTICALFLOWFARNEBACK Computes a dense optical flow using the Gunnar Farneback's algorithm % -% flow = cv.calcOpticalFlowFarneback(prevImg, nextImg) -% flow = cv.calcOpticalFlowFarneback(prevImg, nextImg, 'OptionName',optionValue, ...) +% flow = cv.calcOpticalFlowFarneback(prevImg, nextImg) +% flow = cv.calcOpticalFlowFarneback(prevImg, nextImg, 'OptionName',optionValue, ...) % % ## Input % * __prevImg__ First 8-bit single-channel input image. % * __nextImg__ Second input image of the same size and the same type as -% `prevImg`. +% `prevImg`. % % ## Output % * __flow__ Computed flow image that has the same size as `prevImg` and -% `single` type (2-channels). Flow for `(x,y)` is stored in the third -% dimension. +% `single` type (2-channels). Flow for `(x,y)` is stored in the third +% dimension. % % ## Options % * __InitialFlow__ Initial flow approximation. Not set by default. % * __PyrScale__ Parameter specifying the image scale (`<1`) to build pyramids -% for each image. `PyrScale=0.5` means a classical pyramid, where each -% next layer is twice smaller than the previous one. default 0.5. +% for each image. `PyrScale=0.5` means a classical pyramid, where each next +% layer is twice smaller than the previous one. default 0.5. % * __Levels__ Number of pyramid layers including the initial image. -% `Levels=1` means that no extra layers are created and only the -% original images are used. default 5. +% `Levels=1` means that no extra layers are created and only the original +% images are used. default 5. % * __WinSize__ Averaging window size. Larger values increase the algorithm -% robustness to image noise and give more chances for fast motion -% detection, but yield more blurred motion field. default 13. +% robustness to image noise and give more chances for fast motion detection, +% but yield more blurred motion field. default 13. % * __Iterations__ Number of iterations the algorithm does at each pyramid -% level. default 10. +% level. default 10. % * __PolyN__ Size of the pixel neighborhood used to find polynomial expansion -% in each pixel. Larger values mean that the image will be approximated -% with smoother surfaces, yielding more robust algorithm and more -% blurred motion field. Typically, `PolyN` is 5 or 7. default 5. +% in each pixel. Larger values mean that the image will be approximated with +% smoother surfaces, yielding more robust algorithm and more blurred motion +% field. Typically, `PolyN` is 5 or 7. default 5. % * __PolySigma__ Standard deviation of the Gaussian that is used to smooth -% derivatives used as a basis for the polynomial expansion. For -% `PolyN=5`, you can set `PolySigma = 1.1`. For `PolyN=7`, a good value -% would be `PolySigma = 1.5`. default 1.1. +% derivatives used as a basis for the polynomial expansion. For `PolyN=5`, +% you can set `PolySigma = 1.1`. For `PolyN=7`, a good value would be +% `PolySigma = 1.5`. default 1.1. % * __Gaussian__ Use the Gaussian `WinSize x WinSize` filter instead of a box -% filter of the same size for optical flow estimation. Usually, this -% option gives z more accurate flow than with a box filter, at the cost -% of lower speed. Normally, `WinSize` for a Gaussian window should be -% set to a larger value to achieve the same level of robustness. -% default false. +% filter of the same size for optical flow estimation. Usually, this option +% gives z more accurate flow than with a box filter, at the cost of lower +% speed. Normally, `WinSize` for a Gaussian window should be set to a larger +% value to achieve the same level of robustness. default false. % % The function finds an optical flow for each `prevImg` pixel using the % [Farneback2003] alorithm so that: % -% prevImg(y,x) ~ next(y + flow(y,x,2), x + flow(y,x,1)) +% prevImg(y,x) ~ nextImg(y + flow(y,x,2), x + flow(y,x,1)) % % ## References % [Farneback2003]: diff --git a/+cv/calcOpticalFlowPyrLK.m b/+cv/calcOpticalFlowPyrLK.m index 78fc85051..5989679ab 100644 --- a/+cv/calcOpticalFlowPyrLK.m +++ b/+cv/calcOpticalFlowPyrLK.m @@ -1,61 +1,59 @@ %CALCOPTICALFLOWPYRLK Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids % -% nextPts = cv.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts) -% [nextPts, status, err] = cv.calcOpticalFlowPyrLK(...) -% [...] = cv.calcOpticalFlowPyrLK(..., 'OptionName', optionValue, ...) +% nextPts = cv.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts) +% [nextPts, status, err] = cv.calcOpticalFlowPyrLK(...) +% [...] = cv.calcOpticalFlowPyrLK(..., 'OptionName', optionValue, ...) % % ## Input % * __prevImg__ First 8-bit single-channel or 3-channel input image, or -% pyramid constructed by cv.buildOpticalFlowPyramid +% pyramid constructed by cv.buildOpticalFlowPyramid % * __nextImg__ Second input image or pyramid of the same size and the same -% type as `prevImg`. +% type as `prevImg`. % * __prevPts__ Vector of 2D points for which the flow needs to be found; -% point coordinates must be single-precision floating-point numbers. -% Cell-array of 2-element vectors is accepted: `{[x,y], ...}` +% point coordinates must be single-precision floating-point numbers. +% Cell-array of 2-element vectors is accepted: `{[x,y], ...}` % % ## Output % * __nextPts__ Output vector of 2D points (with single-precision -% floating-point coordinates) containing the calculated new positions of -% input features in the second image. A cell array of 2-elements vectors -% of the same size as `prevPts`. +% floating-point coordinates) containing the calculated new positions of +% input features in the second image. A cell array of 2-elements vectors of +% the same size as `prevPts`. % * __status__ Output status vector. Each element of the vector is set to 1 if -% the flow for the corresponding features has been found. Otherwise, it -% is set to 0. +% the flow for the corresponding features has been found. Otherwise, it is +% set to 0. % * __err__ Output vector of errors; each element of the vector is set to an -% error for the corresponding feature, type of the error measure is -% determined by `GetMinEigenvals` option; if the flow wasn't found then -% the error is not defined (use `status` to find such cases). +% error for the corresponding feature, type of the error measure is +% determined by `GetMinEigenvals` option; if the flow wasn't found then the +% error is not defined (use `status` to find such cases). % % ## Options % * __InitialFlow__ Vector of 2D points to be used for the initial estimate of -% `nextPts`. If not specified, `prevPts` will be used as an initial -% estimate. The vector must have the same size as in the input. -% Not set by default. +% `nextPts`. If not specified, `prevPts` will be used as an initial +% estimate. The vector must have the same size as in the input. Not set by +% default. % * __WinSize__ Size of the search window at each pyramid level. Default to -% [21, 21]. +% [21, 21]. % * __MaxLevel__ 0-based maximal pyramid level number. If set to 0, pyramids -% are not used (single level). If set to 1, two levels are used, and so -% on; if pyramids are passed to input then algorithm will use as many -% levels as pyramids have but no more than `MaxLevel`. Default to 3. +% are not used (single level). If set to 1, two levels are used, and so on; +% if pyramids are passed to input then algorithm will use as many levels as +% pyramids have but no more than `MaxLevel`. Default to 3. % * __Criteria__ Parameter specifying the termination criteria of the -% iterative search algorithm (after the specified maximum number of -% iterations `Criteria.maxCount` or when the search window moves by less -% than `Criteria.epsilon`. Struct with `{'type','maxCount','epsilon'}` -% fields is accepted. The type field should have one of 'Count', 'EPS', -% or 'Count+EPS' to indicate which criteria to use. Default to -% `struct('type','Count+EPS', 'maxCount',30, 'epsilon',0.01)`. +% iterative search algorithm (after the specified maximum number of +% iterations `Criteria.maxCount` or when the search window moves by less +% than `Criteria.epsilon`. Struct with `{'type','maxCount','epsilon'}` +% fields is accepted. The type field should have one of 'Count', 'EPS', or +% 'Count+EPS' to indicate which criteria to use. Default to +% `struct('type','Count+EPS', 'maxCount',30, 'epsilon',0.01)`. % * __GetMinEigenvals__ Use minimum eigen values as an error measure (see -% `MinEigThreshold` description); if the flag is not set, then L1 -% distance between patches around the original and a moved point, -% divided by number of pixels in a window, is used as a error measure. -% Default to false. +% `MinEigThreshold` description); if the flag is not set, then L1 distance +% between patches around the original and a moved point, divided by number +% of pixels in a window, is used as a error measure. Default to false. % * __MinEigThreshold__ The algorithm calculates the minimum eigen value of a -% 2x2 normal matrix of optical flow equations (this matrix is called a -% spatial gradient matrix in [Bouguet00]), divided by number of pixels -% in a window; if this value is less than `MinEigThreshold`, then a -% corresponding feature is filtered out and its flow is not processed, -% so it allows to remove bad points and get a performance boost. -% Default to 1e-4. +% 2x2 normal matrix of optical flow equations (this matrix is called a +% spatial gradient matrix in [Bouguet00]), divided by number of pixels in a +% window; if this value is less than `MinEigThreshold`, then a corresponding +% feature is filtered out and its flow is not processed, so it allows to +% remove bad points and get a performance boost. Default to 1e-4. % % The function implements a sparse iterative version of the Lucas-Kanade % optical flow in pyramids. See [Bouguet00]. The function is parallelized with @@ -68,10 +66,10 @@ % % ## Example % -% prevIm = rgb2gray(imread('prev.jpg')); -% nextIm = rgb2gray(imread('next.jpg')); -% prevPts = cv.goodFeaturesToTrack(prevIm); -% nextPts = cv.calcOpticalFlowPyrLK(prevIm, nextIm, prevPts); +% prevIm = rgb2gray(imread('prev.jpg')); +% nextIm = rgb2gray(imread('next.jpg')); +% prevPts = cv.goodFeaturesToTrack(prevIm); +% nextPts = cv.calcOpticalFlowPyrLK(prevIm, nextIm, prevPts); % % See also: cv.SparsePyrLKOpticalFlow, cv.calcOpticalFlowFarneback, % cv.goodFeaturesToTrack, vision.PointTracker diff --git a/+cv/calibrateCamera.m b/+cv/calibrateCamera.m index 16c0b8577..2ad85f316 100644 --- a/+cv/calibrateCamera.m +++ b/+cv/calibrateCamera.m @@ -1,123 +1,120 @@ %CALIBRATECAMERA Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern % -% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCamera(objectPoints, imagePoints, imageSize) -% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCamera(...) -% [...] = cv.calibrateCamera(..., 'OptionName', optionValue, ...) +% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCamera(objectPoints, imagePoints, imageSize) +% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCamera(...) +% [...] = cv.calibrateCamera(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ A cell array of cells of calibration pattern points in -% the calibration pattern coordinate space `{{[x,y,z], ..}, ...}`. The -% outer vector contains as many elements as the number of the pattern -% views. If the same calibration pattern is shown in each view and it is -% fully visible, all the vectors will be the same. Although, it is -% possible to use partially occluded patterns, or even different -% patterns in different views. Then, the vectors will be different. The -% points are 3D, but since they are in a pattern coordinate system, -% then, if the rig is planar, it may make sense to put the model to a XY -% coordinate plane so that Z-coordinate of each input object point is 0. -% Requires at least 4 points per view. +% the calibration pattern coordinate space `{{[x,y,z], ..}, ...}`. The outer +% vector contains as many elements as the number of the pattern views. If +% the same calibration pattern is shown in each view and it is fully +% visible, all the vectors will be the same. Although, it is possible to use +% partially occluded patterns, or even different patterns in different +% views. Then, the vectors will be different. The points are 3D, but since +% they are in a pattern coordinate system, then, if the rig is planar, it +% may make sense to put the model to a XY coordinate plane so that +% Z-coordinate of each input object point is 0. Requires at least 4 points +% per view. % * __imagePoints__ A cell array of cells of the projections of calibration -% pattern points `{{[x,y], ..}, ...}`. `numel(imagePoints)` and -% `numel(objectPoints)` must be equal, and `numel(imagePoints{i})` must -% be equal to `numel(objectPoints{i})` for each `i`. +% pattern points `{{[x,y], ..}, ...}`. `numel(imagePoints)` and +% `numel(objectPoints)` must be equal, and `numel(imagePoints{i})` must be +% equal to `numel(objectPoints{i})` for each `i`. % * __imageSize__ Size of the image used only to initialize the intrinsic -% camera matrix `[w,h]`. +% camera matrix `[w,h]`. % % ## Output % * __cameraMatrix__ Output 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]` +% `A = [fx 0 cx; 0 fy cy; 0 0 1]` % * __distCoeffs__ Output vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. % * __reprojErr__ the overall RMS re-projection error. % * __rvecs__ Output cell array of rotation vectors (see cv.Rodrigues) -% estimated for each pattern view (cell array of 3-element vectors). -% That is, each k-th rotation vector together with the corresponding -% k-th translation vector (see the next output parameter description) -% brings the calibration pattern from the model coordinate space (in -% which object points are specified) to the world coordinate space, -% that is, a real position of the calibration pattern in the k-th -% pattern view (`k=1:M`) +% estimated for each pattern view (cell array of 3-element vectors). That +% is, each k-th rotation vector together with the corresponding k-th +% translation vector (see the next output parameter description) brings the +% calibration pattern from the model coordinate space (in which object +% points are specified) to the world coordinate space, that is, a real +% position of the calibration pattern in the k-th pattern view (`k=1:M`) % * __tvecs__ Output cell array of translation vectors estimated for each -% pattern view (cell array of 3-element vectors). +% pattern view (cell array of 3-element vectors). % * __stdDevsIntrinsics__ Output vector of standard deviations estimated for -% intrinsic parameters. Order of deviations values: -% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one -% of parameters is not estimated, its deviation is equals to zero. +% intrinsic parameters. Order of deviations values: +% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one of +% parameters is not estimated, its deviation is equals to zero. % * __stdDevsExtrinsics__ Output vector of standard deviations estimated for -% extrinsic parameters. Order of deviations values: -% `(R1, T1, ..., RM, TM)` where `M` is number of pattern views, `Ri, Ti` -% are concatenated 1x3 vectors. +% extrinsic parameters. Order of deviations values: `(R1, T1, ..., RM, TM)` +% where `M` is number of pattern views, `Ri, Ti` are concatenated 1x3 +% vectors. % * __perViewErrors__ Output vector of the RMS re-projection error estimated -% for each pattern view. +% for each pattern view. % % ## Options % * __CameraMatrix__ Input 3x3 camera matrix used as initial value for -% `cameraMatrix`. If any of `UseIntrinsicGuess`, `FixAspectRatio`, or -% `FixFocalLength` are specified, some or all of `fx`, `fy`, `cx`, `cy` -% must be initialized before calling the function. Not set by default -% (uses `eye(3)`). +% `cameraMatrix`. If any of `UseIntrinsicGuess`, `FixAspectRatio`, or +% `FixFocalLength` are specified, some or all of `fx`, `fy`, `cx`, `cy` must +% be initialized before calling the function. Not set by default (uses +% `eye(3)`). % * __DistCoeffs__ Input 4, 5, 8, 12 or 14 elements vector used as an initial -% values of `distCoeffs`. Not set by default (uses `zeros(1,14)`). +% values of `distCoeffs`. Not set by default (uses `zeros(1,14)`). % * __UseIntrinsicGuess__ When true, `CameraMatrix` contains valid initial -% values of `fx`, `fy`, `cx`, `cy` that are optimized further. -% Otherwise, `(cx,cy)` is initially set to the image center (`imageSize` -% is used), and focal distances are computed in a least-squares fashion. -% Note, that if intrinsic parameters are known, there is no need to use -% this function just to estimate extrinsic parameters. Use cv.solvePnP -% instead. default false. +% values of `fx`, `fy`, `cx`, `cy` that are optimized further. Otherwise, +% `(cx,cy)` is initially set to the image center (`imageSize` is used), and +% focal distances are computed in a least-squares fashion. Note, that if +% intrinsic parameters are known, there is no need to use this function just +% to estimate extrinsic parameters. Use cv.solvePnP instead. default false. % * __FixPrincipalPoint__ The principal point is not changed during the global -% optimization. It stays at the image center or at a different location -% specified when `UseIntrinsicGuess` is set too. default false. +% optimization. It stays at the image center or at a different location +% specified when `UseIntrinsicGuess` is set too. default false. % * __FixFocalLength__ Fix `fx` and `fy`, as specified in the input -% `CameraMatrix`. default false. +% `CameraMatrix`. default false. % * __FixAspectRatio__ The functions considers only `fy` as a free parameter. -% The ratio `fx/fy` stays the same as in the input `CameraMatrix`. When -% `UseIntrinsicGuess` is not set, the actual input values of `fx` and -% `fy` are ignored, only their ratio is computed and used further. -% default false. +% The ratio `fx/fy` stays the same as in the input `CameraMatrix`. When +% `UseIntrinsicGuess` is not set, the actual input values of `fx` and `fy` +% are ignored, only their ratio is computed and used further. default false. % * __ZeroTangentDist__ Tangential distortion coefficients `p1` and `p2` are -% set to zeros and stay fixed. default false. -% * __FixTangentDist__ The tangential distortion coefficients are not -% changed during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% set to zeros and stay fixed. default false. +% * __FixTangentDist__ The tangential distortion coefficients are not changed +% during the optimization. If `UseIntrinsicGuess` is set, the coefficient +% from the supplied `DistCoeffs` matrix is used. Otherwise, it is set to 0. +% default false. % * __FixK1__, ..., __FixK6__ The corresponding radial distortion coefficient -% is not changed during the optimization. If `UseIntrinsicGuess` is set, -% the coefficient from the supplied `DistCoeffs` matrix is used. -% Otherwise, it is set to 0. default false. +% is not changed during the optimization. If `UseIntrinsicGuess` is set, the +% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, it +% is set to 0. default false. % * __RationalModel__ Coefficients `k4`, `k5`, and `k6` are enabled. To -% provide the backward compatibility, this extra flag should be -% explicitly specified to make the calibration function use the rational -% model and return 8 coefficients. If the flag is not set, the function -% computes and returns only 5 distortion coefficients. default false. -% (`RationalModel` as false implies `FixK4`,`FixK5`,`FixK6` as true). +% provide the backward compatibility, this extra flag should be explicitly +% specified to make the calibration function use the rational model and +% return 8 coefficients. If the flag is not set, the function computes and +% returns only 5 distortion coefficients. default false. (`RationalModel` as +% false implies `FixK4`,`FixK5`,`FixK6` as true). % * __ThinPrismModel__ Coefficients `s1`, `s2`, `s3` and `s4` are enabled. To -% provide the backward compatibility, this extra flag should be -% explicitly specified to make the calibration function use the thin -% prism model and return 12 coefficients. If the flag is not set, the -% function computes and returns only 5 distortion coefficients. default -% false. (`ThinPrismModel` as false implies `FixS1S2S3S4` as true). +% provide the backward compatibility, this extra flag should be explicitly +% specified to make the calibration function use the thin prism model and +% return 12 coefficients. If the flag is not set, the function computes and +% returns only 5 distortion coefficients. default false. (`ThinPrismModel` +% as false implies `FixS1S2S3S4` as true). % * __FixS1S2S3S4__ The thin prism distortion coefficients are not changed -% during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% during the optimization. If `UseIntrinsicGuess` is set, the coefficient +% from the supplied `DistCoeffs` matrix is used. Otherwise, it is set to 0. +% default false. % * __TiltedModel__ Coefficients `tauX` and `tauY` are enabled. To provide the -% backward compatibility, this extra flag should be explicitly specified -% to make the calibration function use the tilted sensor model and -% return 14 coefficients. If the flag is not set, the function computes -% and returns only 5 distortion coefficients. default false. -% (`TiltedModel` as false implies `FixTauXTauY` as true). +% backward compatibility, this extra flag should be explicitly specified to +% make the calibration function use the tilted sensor model and return 14 +% coefficients. If the flag is not set, the function computes and returns +% only 5 distortion coefficients. default false. (`TiltedModel` as false +% implies `FixTauXTauY` as true). % * __FixTauXTauY__ The coefficients of the tilted sensor model are not -% changed during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% changed during the optimization. If `UseIntrinsicGuess` is set, the +% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, it +% is set to 0. default false. % * __UseLU__ Use LU instead of SVD decomposition for solving. Much faster but -% potentially less precise. default false. +% potentially less precise. default false. % * __UseQR__ Use QR instead of SVD decomposition for solving. Faster but -% potentially less precise. default false. +% potentially less precise. default false. % * __Criteria__ Termination criteria for the iterative optimization algorithm. -% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` +% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` % % The function estimates the intrinsic camera parameters and extrinsic % parameters for each of the views. The algorithm is based on [Zhang2000] and @@ -146,7 +143,7 @@ % (using the current estimates for camera parameters and the poses) % object points `objectPoints`. See cv.projectPoints for details. % -% ## Note +% ### Note % If you use a non-square (=non-NxN) grid and cv.findChessboardCorners for % calibration, and cv.calibrateCamera returns bad values (zero distortion % coefficients, an image center very far from `(w/2-0.5,h/2-0.5)`, and/or diff --git a/+cv/calibrationMatrixValues.m b/+cv/calibrationMatrixValues.m index 6f5420e73..83cb60a76 100644 --- a/+cv/calibrationMatrixValues.m +++ b/+cv/calibrationMatrixValues.m @@ -1,28 +1,27 @@ %CALIBRATIONMATRIXVALUES Computes useful camera characteristics from the camera matrix % -% S = cv.calibrationMatrixValues(cameraMatrix, imageSize, apertureWidth, apertureHeight) +% S = cv.calibrationMatrixValues(cameraMatrix, imageSize, apertureWidth, apertureHeight) % % ## Input % * __cameraMatrix__ Input 3x3 camera matrix that can be estimated by -% cv.calibrateCamera or cv.stereoCalibrate. +% cv.calibrateCamera or cv.stereoCalibrate. % * __imageSize__ Input image size `[w,h]` in pixels. % * __apertureWidth__ Physical width in mm of the sensor. % * __apertureHeight__ Physical height in mm of the sensor. % % ## Output % * __S__ Struct with the following fields -% * __fovx__ Output field of view in degrees along the horizontal sensor -% axis. -% * __fovy__ Output field of view in degrees along the vertical sensor -% axis. -% * __focalLength__ Focal length of the lens in mm. -% * __principalPoint__ Principal point `[cx,cy]` in mm. -% * __aspectRatio__ Pixel aspect ratio `fy/fx`. +% * __fovx__ Output field of view in degrees along the horizontal sensor +% axis. +% * __fovy__ Output field of view in degrees along the vertical sensor axis. +% * __focalLength__ Focal length of the lens in mm. +% * __principalPoint__ Principal point `[cx,cy]` in mm. +% * __aspectRatio__ Pixel aspect ratio `fy/fx`. % % The function computes various useful camera characteristics from the % previously estimated camera matrix. % -% ## Note +% ### Note % Do keep in mind that the unity measure 'mm' stands for whatever unit of % measure one chooses for the chessboard pitch (it can thus be any value). % diff --git a/+cv/cartToPolar.m b/+cv/cartToPolar.m index 4d16fc074..6d8ebe7c3 100644 --- a/+cv/cartToPolar.m +++ b/+cv/cartToPolar.m @@ -1,29 +1,28 @@ %CARTTOPOLAR Calculates the magnitude and angle of 2D vectors % -% [mag, ang] = cv.cartToPolar(x, y) -% [...] = cv.cartToPolar(..., 'OptionName',optionValue, ...) +% [mag, ang] = cv.cartToPolar(x, y) +% [...] = cv.cartToPolar(..., 'OptionName',optionValue, ...) % % ## Input % * __x__ array of x-coordinates; this must be a single-precision or -% double-precision floating-point array. -% * __y__ array of y-coordinates, that must have the same size and type as -% `x`. +% double-precision floating-point array. +% * __y__ array of y-coordinates, that must have the same size and type as `x`. % % ## Output % * __mag__ output array of magnitudes of the same size and type as `x`. % * __ang__ output array of angles that has the same size and type as `x`; the -% angles are measured in radians (from 0 to 2*pi) or in degrees -% (0 to 360 degrees). +% angles are measured in radians (from 0 to 2*pi) or in degrees (0 to 360 +% degrees). % % ## Options % * __Degrees__ a flag, indicating whether the angles are measured in radians -% (which is by default), or in degrees. default false +% (which is by default), or in degrees. default false % % The function cv.cartToPolar calculates either the magnitude, angle, or both % for every 2D vector `(x(I),y(I))`: % -% magnitude(I) = sqrt(x(I)^2 + y(I)^2) -% angle(I) = atan2(y(I), x(I)) [* (180/pi)] +% magnitude(I) = sqrt(x(I)^2 + y(I)^2) +% angle(I) = atan2(y(I), x(I)) [* (180/pi)] % % The angles are calculated with accuracy about 0.3 degrees. For the point % `(0,0)`, the angle is set to 0. diff --git a/+cv/circle.m b/+cv/circle.m index 5e35c539b..badc64bd2 100644 --- a/+cv/circle.m +++ b/+cv/circle.m @@ -1,7 +1,7 @@ %CIRCLE Draws a circle % -% img = cv.circle(img, center, radius) -% [...] = cv.circle(..., 'OptionName', optionValue, ...) +% img = cv.circle(img, center, radius) +% [...] = cv.circle(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image where the circle is drawn. @@ -13,16 +13,16 @@ % % ## Options % * __Color__ 3-element floating-point vector specifying circle color. -% default is a black color +% default is a black color % * __Thickness__ Thickness of the circle outline, if positive. Negative -% thickness means that a filled circle is to be drawn (-1 or the string -% 'Filled'). default 1. +% thickness means that a filled circle is to be drawn (-1 or the string +% 'Filled'). default 1. % * __LineType__ Type of the circle boundary. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line -% * __Shift__ Number of fractional bits in the coordinates of the center -% and in the radius value. default 0. +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line +% * __Shift__ Number of fractional bits in the coordinates of the center and +% in the radius value. default 0. % % The function cv.circle draws a simple or filled circle with a given center % and radius. diff --git a/+cv/clipLine.m b/+cv/clipLine.m index 6ab74dd4f..2d5b25634 100644 --- a/+cv/clipLine.m +++ b/+cv/clipLine.m @@ -1,7 +1,7 @@ %CLIPLINE Clips the line against the image rectangle % -% [B,pt1,pt2] = cv.clipLine(imgSize, pt1, pt2) -% [B,pt1,pt2] = cv.clipLine(imgRect, pt1, pt2) +% [B,pt1,pt2] = cv.clipLine(imgSize, pt1, pt2) +% [B,pt1,pt2] = cv.clipLine(imgRect, pt1, pt2) % % ## Input % * __imgSize__ Image size `[w,h]`. The image rectangle is `[0, 0, w, h]`. diff --git a/+cv/colorChange.m b/+cv/colorChange.m index 1e1b2e647..14aba81d1 100644 --- a/+cv/colorChange.m +++ b/+cv/colorChange.m @@ -1,7 +1,7 @@ %COLORCHANGE Color Change % -% dst = cv.colorChange(src, mask) -% dst = cv.colorChange(src, mask, 'OptionName',optionValue, ...) +% dst = cv.colorChange(src, mask) +% dst = cv.colorChange(src, mask, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -15,8 +15,8 @@ % * __G__ G-channel multiply factor. default 1.0 % * __B__ B-channel multiply factor. default 1.0 % * __FlipChannels__ whether to flip the order of color channels in inputs -% `src` and `mask` and output `dst`, between MATLAB's RGB order and -% OpenCV's BGR (input: RGB->BGR, output: BGR->RGB). default false +% `src` and `mask` and output `dst`, between MATLAB's RGB order and OpenCV's +% BGR (input: RGB->BGR, output: BGR->RGB). default false % % Given an original color image, two differently colored versions of this % image can be mixed seamlessly. diff --git a/+cv/compare.m b/+cv/compare.m index 2c560ac2b..05b80ba2f 100644 --- a/+cv/compare.m +++ b/+cv/compare.m @@ -1,48 +1,48 @@ %COMPARE Performs the per-element comparison of two arrays or an array and scalar value % -% dst = cv.compare(src1, src2, cmpop) +% dst = cv.compare(src1, src2, cmpop) % % ## Input % * __src1__ first input array or a scalar; when it is an array, it must have -% a single channel. +% a single channel. % * __src2__ second input array or a scalar; when it is an array, it must have -% a single channel. +% a single channel. % * __cmpop__ comparison type, specifies correspondence between the arrays. -% One of: -% * __eq__ `src1` is equal to `src2`. -% * __gt__ `src1` is greater than `src2`. -% * __ge__ `src1` is greater than or equal to `src2`. -% * __lt__ `src1` is less than `src2`. -% * __le__ `src1` is less than or equal to `src2`. -% * __ne__ `src1` is unequal to `src2`. +% One of: +% * __eq__ `src1` is equal to `src2`. +% * __gt__ `src1` is greater than `src2`. +% * __ge__ `src1` is greater than or equal to `src2`. +% * __lt__ `src1` is less than `src2`. +% * __le__ `src1` is less than or equal to `src2`. +% * __ne__ `src1` is unequal to `src2`. % % ## Output % * __dst__ output array of type `uint8` that has the same size and the same -% number of channels as the input arrays. +% number of channels as the input arrays. % % The function compares: % % * Elements of two arrays when `src1` and `src2` have the same size: % -% dst(I) = src1(I) cmpop src2(I) +% dst(I) = src1(I) cmpop src2(I) % % * Elements of `src1` with a scalar `src2` when `src2` is constructed from % Scalar or has a single element: % -% dst(I) = src1(I) cmpop src2 +% dst(I) = src1(I) cmpop src2 % % * `src1` with elements of `src2` when `src1` is constructed from Scalar or % has a single element: % -% dst(I) = src1 cmpop src2(I) +% dst(I) = src1 cmpop src2(I) % % When the comparison result is true, the corresponding element of output % array is set to 255. The comparison operations can be replaced with the % equivalent matrix expressions: % -% dst1 = src1 >= src2; -% dst2 = src1 < 8; -% ... +% dst1 = src1 >= src2; +% dst2 = src1 < 8; +% ... % % See also: cv.threshold, eq, gt, ge, lt, le, ne, min, max % diff --git a/+cv/compareHist.m b/+cv/compareHist.m index 0b2f7bea8..7c2d7d6c1 100644 --- a/+cv/compareHist.m +++ b/+cv/compareHist.m @@ -1,11 +1,11 @@ %COMPAREHIST Compares two histograms % -% d = cv.compareHist(H1, H2) -% d = cv.compareHist(H1, H2, 'OptionName',optionValue, ...) +% d = cv.compareHist(H1, H2) +% d = cv.compareHist(H1, H2, 'OptionName',optionValue, ...) % % ## Input % * __H1__ First compared histogram. Both dense or sparse arrays are supported -% (single-precision floating-point). +% (single-precision floating-point). % * __H2__ Second compared histogram of the same size and type as `H1`. % % ## Output @@ -13,15 +13,14 @@ % % ## Options % * __Method__ Comparison method, default 'Correlation'. One of: -% * __Correlation__ Correlation. -% * __ChiSquare__ Chi-Square. -% * __Intersection__ Intersection. -% * __Bhattacharyya__ Bhattacharyya distance (In fact, OpenCV computes -% Hellinger distance, which is related to Bhattacharyya -% coefficient). -% * __Hellinger__ Synonym for 'Bhattacharyya'. -% * __AltChiSquare__ Alternative Chi-Square. -% * __KullbackLeibler__ Kullback-Leibler divergence. +% * __Correlation__ Correlation. +% * __ChiSquare__ Chi-Square. +% * __Intersection__ Intersection. +% * __Bhattacharyya__ Bhattacharyya distance (In fact, OpenCV computes +% Hellinger distance, which is related to Bhattacharyya coefficient). +% * __Hellinger__ Synonym for 'Bhattacharyya'. +% * __AltChiSquare__ Alternative Chi-Square. +% * __KullbackLeibler__ Kullback-Leibler divergence. % % The function cv.compareHist compares two dense or two sparse histograms % using the specified method. @@ -37,34 +36,34 @@ % % * __Correlation__ % -% d(H1,H2) = (sum_{I}((H1(I) - H1_hat(I))*(H2(I) - H2_hat(I)))) / -% (sum_{I}(H1(I) - H1_hat(I))^2 * sum_{I}(H2(I) - H2_hat(I))^2) +% d(H1,H2) = (sum_{I}((H1(I) - H1_hat(I))*(H2(I) - H2_hat(I)))) / +% (sum_{I}(H1(I) - H1_hat(I))^2 * sum_{I}(H2(I) - H2_hat(I))^2) % -% where `Hk_hat = 1/N * sum_{J} Hk(J)` and `N` is a total number of -% histogram bins. +% where `Hk_hat = 1/N * sum_{J} Hk(J)` and `N` is a total number of +% histogram bins. % % * __ChiSquare__ % -% d(H1,H2) = sum_{I} ((H1(I) - H2(I))^2 / H1(I)) +% d(H1,H2) = sum_{I} ((H1(I) - H2(I))^2 / H1(I)) % % * __Intersection__ % -% d(H1,H2) = sum_{I} min(H1(I), H2(I)) +% d(H1,H2) = sum_{I} min(H1(I), H2(I)) % % * __Bhattacharyya__ % -% d(H1,H2) = sqrt(1 - (1/sqrt(H1_hat(I)*H2_hat(I)*N^2)) * sum_{I}(sqrt(H1(I)*H2(I)))) +% d(H1,H2) = sqrt(1 - (1/sqrt(H1_hat(I)*H2_hat(I)*N^2)) * sum_{I}(sqrt(H1(I)*H2(I)))) % % * __AltChiSquare__ % -% d(H1,H2) = 2 * sum_{I} ((H1(I) - H2(I))^2 / (H1(I) + H2(I))) +% d(H1,H2) = 2 * sum_{I} ((H1(I) - H2(I))^2 / (H1(I) + H2(I))) % -% This alternative formula is regularly used for texture comparison. -% See e.g. [Puzicha1997] +% This alternative formula is regularly used for texture comparison. See +% [Puzicha1997]. % % * __KullbackLeibler__ % -% d(H1,H2) = sum_{I} (H1(I) * log(H1(I)/H2(I))) +% d(H1,H2) = sum_{I} (H1(I) * log(H1(I)/H2(I))) % % ## References % [Puzicha1997]: diff --git a/+cv/composeRT.m b/+cv/composeRT.m index eed669811..7d9b53f64 100644 --- a/+cv/composeRT.m +++ b/+cv/composeRT.m @@ -1,6 +1,6 @@ %COMPOSERT Combines two rotation-and-shift transformations % -% S = cv.composeRT(rvec1, tvec1, rvec2, tvec2) +% S = cv.composeRT(rvec1, tvec1, rvec2, tvec2) % % ## Input % * __rvec1__ First rotation vector, 3x1 float vector. @@ -10,18 +10,17 @@ % % ## Output % * __S__ A scalar struct with the following fields: -% * __rvec3__ Rotation vector of the superposition, 3x1 float vector. -% * __tvec3__ Translation vector of the superposition, 3x1 float vector. -% * __dr3dr1__, __dr3dt1__, __dr3dr2__, __dr3dt2__, -% __dt3dr1__, __dt3dt1__, __dt3dr2__, __dt3dt2__ -% Derivatives of `rvec3` or `tvec3` with regard to `rvec1`, -% `tvec1`, `rvec2`, and `tvec2`, respectively. Each derivative is -% a 3x3 float matrix. +% * __rvec3__ Rotation vector of the superposition, 3x1 float vector. +% * __tvec3__ Translation vector of the superposition, 3x1 float vector. +% * __dr3dr1__, __dr3dt1__, __dr3dr2__, __dr3dt2__, __dt3dr1__, __dt3dt1__, +% __dt3dr2__, __dt3dt2__ Derivatives of `rvec3` or `tvec3` with regard to +% `rvec1`, `tvec1`, `rvec2`, and `tvec2`, respectively. Each derivative is +% a 3x3 float matrix. % % The function computes: % -% rvec3 = rodrigues^-1( rodorigues(rvec2) * rodrigues(rvec1) ) -% tvec3 = rodrigues(rvec2) * tvec1 + tvec2 +% rvec3 = rodrigues^-1( rodorigues(rvec2) * rodrigues(rvec1) ) +% tvec3 = rodrigues(rvec2) * tvec1 + tvec2 % % where `rodrigues` denotes a rotation vector to a rotation matrix % transformation, and `rodrigues^-1` denotes the inverse transformation. diff --git a/+cv/computeCorrespondEpilines.m b/+cv/computeCorrespondEpilines.m index 9a10b217f..4d9b20461 100644 --- a/+cv/computeCorrespondEpilines.m +++ b/+cv/computeCorrespondEpilines.m @@ -1,23 +1,23 @@ %COMPUTECORRESPONDEPILINES For points in an image of a stereo pair, computes the corresponding epilines in the other image % -% lines = cv.computeCorrespondEpilines(points, F) -% [...] = cv.computeCorrespondEpilines(..., 'OptionName', optionValue, ...) +% lines = cv.computeCorrespondEpilines(points, F) +% [...] = cv.computeCorrespondEpilines(..., 'OptionName', optionValue, ...) % % ## Input % * __points__ Input points. Nx2/Nx1x2/1xNx2 floating-point array, or cell -% array of length N of 2-element vectors `{[x,y], ...}`. +% array of length N of 2-element vectors `{[x,y], ...}`. % * __F__ 3x3 Fundamental matrix that can be estimated using -% cv.findFundamentalMat or cv.stereoRectify. +% cv.findFundamentalMat or cv.stereoRectify. % % ## Output % * __lines__ Output vector of the epipolar lines corresponding to the points -% in the other image. Each line `ax + by + c = 0` is encoded by 3 -% numbers `(a,b,c)`. Nx3/Nx1x3 numeric matrix or a cell-array of -% 3-element vectors `{[a,b,c], ...}` depending on `points` format. +% in the other image. Each line `ax + by + c = 0` is encoded by 3 numbers +% `(a,b,c)`. Nx3/Nx1x3 numeric matrix or a cell-array of 3-element vectors +% `{[a,b,c], ...}` depending on `points` format. % % ## Options % * __WhichImage__ Index of the image (1 or 2) that contains the points. -% default 1. +% default 1. % % For every point in one of the two images of a stereo pair, the function % finds the equation of the corresponding epipolar line in the other image. @@ -26,12 +26,12 @@ % `lines2{i}` in the second image for the point `points1{i}` in the first image % (when `WhichImage=1`) is computed as: % -% lines2{i} = F * points1{i} +% lines2{i} = F * points1{i} % -% And vice versa, when `WhichImage=2`, `lines1{i}` is computed from `points2{i}` -% as: +% And vice versa, when `WhichImage=2`, `lines1{i}` is computed from +% `points2{i}` as: % -% lines1{i} = F^T * points2{i} +% lines1{i} = F^T * points2{i} % % Line coefficients are defined up to a scale. They are normalized so that % `a_i^2 + b_i^2 = 1`. diff --git a/+cv/computeRecallPrecisionCurve.m b/+cv/computeRecallPrecisionCurve.m index 4387b5519..d8b050778 100644 --- a/+cv/computeRecallPrecisionCurve.m +++ b/+cv/computeRecallPrecisionCurve.m @@ -1,16 +1,16 @@ %COMPUTERECALLPRECISIONCURVE Evaluate a descriptor extractor by computing precision/recall curve % -% recallPrecisionCurve = cv.computeRecallPrecisionCurve(matches1to2, correctMatches1to2Mask) +% recallPrecisionCurve = cv.computeRecallPrecisionCurve(matches1to2, correctMatches1to2Mask) % % ## Input % * __matches1to2__ Cell array of matches, each match is a structure array -% with the following fields: -% * __queryIdx__ query descriptor index (zero-based index) -% * __trainIdx__ train descriptor index (zero-based index) -% * __imgIdx__ train image index (zero-based index) -% * __distance__ distance between descriptors (scalar) +% with the following fields: +% * __queryIdx__ query descriptor index (zero-based index) +% * __trainIdx__ train descriptor index (zero-based index) +% * __imgIdx__ train image index (zero-based index) +% * __distance__ distance between descriptors (scalar) % * __correctMatches1to2Mask__ Cell array of the same size as `matches1to2`. -% A mask indicating correct matches. +% A mask indicating correct matches. % % ## Output % * __recallPrecisionCurve__ Recall/precision curve, Nx2 matrix. diff --git a/+cv/connectedComponents.m b/+cv/connectedComponents.m index 9a47ea92d..0166f8393 100644 --- a/+cv/connectedComponents.m +++ b/+cv/connectedComponents.m @@ -1,48 +1,48 @@ %CONNECTEDCOMPONENTS Computes the connected components labeled image of boolean image % -% [labels,N] = cv.connectedComponents(image) -% [labels,N,stats,centroids] = cv.connectedComponents(image) -% [...] = cv.connectedComponents(..., 'OptionName', optionValue, ...) +% [labels,N] = cv.connectedComponents(image) +% [labels,N,stats,centroids] = cv.connectedComponents(image) +% [...] = cv.connectedComponents(..., 'OptionName', optionValue, ...) % % ## Input % * __image__ boolean image to be labeled, 1-channel 8-bit or logical matrix. % % ## Output % * __labels__ output labeled image of same size as input `image` and -% specified type in `LType`. Labels are in the range `[0, N-1]` where -% 0 represents the background label. +% specified type in `LType`. Labels are in the range `[0, N-1]` where 0 +% represents the background label. % * __N__ the total number of labels. % * __stats__ (optional) statistics output for each label, including the -% background label, see below for available statistics. A Nx5 `int32` -% matrix. Statistics are accessed via `stats(label, col)` where: -% * `col=1`: The leftmost (x) coordinate which is the inclusive start of -% the bounding box in the horizontal direction. -% * `col=2`: The topmost (y) coordinate which is the inclusive start of -% the bounding box in the vertical direction. -% * `col=3`: The horizontal size of the bounding box (width). -% * `col=4`: The vertical size of the bounding box (height). -% * `col=5`: The total area (in pixels) of the connected component. +% background label, see below for available statistics. A Nx5 `int32` +% matrix. Statistics are accessed via `stats(label, col)` where: +% * `col=1`: The leftmost (x) coordinate which is the inclusive start of the +% bounding box in the horizontal direction. +% * `col=2`: The topmost (y) coordinate which is the inclusive start of the +% bounding box in the vertical direction. +% * `col=3`: The horizontal size of the bounding box (width). +% * `col=4`: The vertical size of the bounding box (height). +% * `col=5`: The total area (in pixels) of the connected component. % * __centroids__ (optional) 64-bit floating-point centroid `(x,y)` output for -% each label, including the background label. A Nx2 numeric matrix. -% Centroids are accessed via `centroids(label,:)` for x and y. +% each label, including the background label. A Nx2 numeric matrix. +% Centroids are accessed via `centroids(label,:)` for x and y. % % ## Options % * __Connectivity__ 8 or 4 for 8-way or 4-way connectivity respectively. -% default 8 +% default 8 % * __LType__ specifies the output image label type, an important -% consideration based on the total number of labels or alternatively the -% total number of pixels in the source image. Currently `int32` and -% `uint16` are supported. default `int32` -% * __Method__ specifies the connected components labeling algorithm to -% use, currently Grana (BBDT) and Wu's (SAUF) algorithms are supported. -% Note that SAUF algorithm forces a row major ordering of labels while -% BBDT does not. One of: -% * __Wu__ SAUF algorithm for 8-way connectivity, SAUF algorithm for -% 4-way connectivity -% * __Default__ BBDT algorithm for 8-way connectivity, SAUF algorithm -% for 4-way connectivity -% * __Grana__ BBDT algorithm for 8-way connectivity, SAUF algorithm for -% 4-way connectivity +% consideration based on the total number of labels or alternatively the +% total number of pixels in the source image. Currently `int32` and `uint16` +% are supported. default `int32` +% * __Method__ specifies the connected components labeling algorithm to use, +% currently Grana (BBDT) and Wu's (SAUF) algorithms are supported. Note that +% SAUF algorithm forces a row major ordering of labels while BBDT does not. +% One of: +% * __Wu__ SAUF algorithm for 8-way connectivity, SAUF algorithm for 4-way +% connectivity +% * __Default__ BBDT algorithm for 8-way connectivity, SAUF algorithm for +% 4-way connectivity +% * __Grana__ BBDT algorithm for 8-way connectivity, SAUF algorithm for +% 4-way connectivity % % The last two optional output arguments are only computed if requested. % diff --git a/+cv/contourArea.m b/+cv/contourArea.m index 9eb51d708..910185a5f 100644 --- a/+cv/contourArea.m +++ b/+cv/contourArea.m @@ -1,22 +1,22 @@ %CONTOURAREA Calculates a contour area % -% a = cv.contourArea(curve) -% a = cv.contourArea(curve, 'OptionName', optionValue, ...) +% a = cv.contourArea(curve) +% a = cv.contourArea(curve, 'OptionName', optionValue, ...) % % ## Input % * __curve__ Input vector of 2D points (contour vertices) stored in numeric -% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors -% (`{[x,y], ...}`). +% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors +% (`{[x,y], ...}`). % % ## Output % * __a__ Output area. % % ## Options % * __Oriented__ Oriented area flag. If it is true, the function returns a -% signed area value, depending on the contour orientation (clockwise or -% counter-clockwise). Using this feature you can determine orientation -% of a contour by taking the sign of an area. By default, the parameter -% is false, which means that the absolute value is returned. +% signed area value, depending on the contour orientation (clockwise or +% counter-clockwise). Using this feature you can determine orientation of a +% contour by taking the sign of an area. By default, the parameter is false, +% which means that the absolute value is returned. % % The function computes a contour area. Similarly to cv.moments, the area is % computed using the Green formula. Thus, the returned area and the number of @@ -26,12 +26,12 @@ % % ## Example % -% contour = {[0 0], [10 0], [10 10], [5 4]}; -% area0 = cv.contourArea(contour); -% approx = cv.approxPolyDP(contour, 'Epsilon',5, 'Closed',true); -% area1 = cv.contourArea(approx); -% fprintf('area0 = %.2f\narea1 = %.2f\napprox poly vertices = %d\n', ... -% area0, area1, numel(approx)); +% contour = {[0 0], [10 0], [10 10], [5 4]}; +% area0 = cv.contourArea(contour); +% approx = cv.approxPolyDP(contour, 'Epsilon',5, 'Closed',true); +% area1 = cv.contourArea(approx); +% fprintf('area0 = %.2f\narea1 = %.2f\napprox poly vertices = %d\n', ... +% area0, area1, numel(approx)); % % See also: cv.arcLength, polyarea % diff --git a/+cv/convertFp16.m b/+cv/convertFp16.m index 343bdde49..5e7c0eb23 100644 --- a/+cv/convertFp16.m +++ b/+cv/convertFp16.m @@ -1,6 +1,6 @@ %CONVERTFP16 Converts an array to half precision floating number % -% dst = cv.convertFp16(src) +% dst = cv.convertFp16(src) % % ## Input % * __src__ input array. @@ -10,7 +10,9 @@ % % This function converts FP32 (single precision floating point) from/to FP16 % (half precision floating point). The input array has to have type of -% 'single' or 'int16' to represent the bit depth. If the input array is +% `single` or `int16` to represent the bit depth. If the input array is % neither of them, the function will raise an error. The format of half % precision floating point is defined in IEEE 754-2008. % +% See also: cv.convertTo +% diff --git a/+cv/convertMaps.m b/+cv/convertMaps.m index 16d60d696..648e27979 100644 --- a/+cv/convertMaps.m +++ b/+cv/convertMaps.m @@ -1,35 +1,35 @@ %CONVERTMAPS Converts image transformation maps from one representation to another % -% [dstmap1, dstmap2] = cv.convertMaps(map1, map2) -% [dstmap1, dstmap2] = cv.convertMaps(map1) -% [...] = cv.convertMaps(..., 'OptionName',optionValue, ...) +% [dstmap1, dstmap2] = cv.convertMaps(map1, map2) +% [dstmap1, dstmap2] = cv.convertMaps(map1) +% [...] = cv.convertMaps(..., 'OptionName',optionValue, ...) % % ## Input % * __map1__ The first input map of either (x,y) points or just x values of -% the transformation having the type `int16` (MxNx2), `single` (MxN), or -% `single` (MxNx2). +% the transformation having the type `int16` (MxNx2), `single` (MxN), or +% `single` (MxNx2). % * __map2__ The second input map of y values of the transformation having the -% type `uint16` (MxN) or `single` (MxN), or none (empty map if `map1` is -% (x,y) points), respectively. +% type `uint16` (MxN) or `single` (MxN), or none (empty map if `map1` is +% (x,y) points), respectively. % % ## Output % * __dstmap1__ The first output map that has the type specified by -% `DstMap1Type` and the same row/col size as `src`. See details below. +% `DstMap1Type` and the same row/col size as `src`. See details below. % * __dstmap2__ The second output map. See details below. % % ## Options % * __DstMap1Type__ Type of the first output map. The default value of -1 -% chooses an appropriate type depending on the direction of conversion. -% So if `map1` is in fixed-point representation, the output type is -% 'single2' (for combined floating-point representation), otherwise if -% `map1` is in floating-point representation, the output type is `int16' -% (for fixed-point representation). Accepted types are: -% * __int16__ first output map is a MxNx2 `int16` array -% * __single1__ first output map is a MxNx1 `single` matrix -% * __single2__ first output map is a MxNx2 `single` matrix +% chooses an appropriate type depending on the direction of conversion. So +% if `map1` is in fixed-point representation, the output type is `single2` +% (for combined floating-point representation), otherwise if `map1` is in +% floating-point representation, the output type is `int16` (for fixed-point +% representation). Accepted types are: +% * __int16__ first output map is a MxNx2 `int16` array +% * __single1__ first output map is a MxNx1 `single` matrix +% * __single2__ first output map is a MxNx2 `single` matrix % * __NNInterpolation__ Flag indicating whether the fixed-point maps are used -% for the nearest-neighbor or for a more complex interpolation. If this -% flag is true, the second output will be empty. default false +% for the nearest-neighbor or for a more complex interpolation. If this +% flag is true, the second output will be empty. default false % % The function converts a pair of maps for cv.remap from one representation to % another. The following options `(map1, map2) -> (dstmap1, dstmap2)` are diff --git a/+cv/convertPointsFromHomogeneous.m b/+cv/convertPointsFromHomogeneous.m index 551f5317a..caf7b522a 100644 --- a/+cv/convertPointsFromHomogeneous.m +++ b/+cv/convertPointsFromHomogeneous.m @@ -1,17 +1,17 @@ %CONVERTPOINTSFROMHOMOGENEOUS Converts points from homogeneous to Euclidean space % -% dst = cv.convertPointsFromHomogeneous(src) +% dst = cv.convertPointsFromHomogeneous(src) % % ## Input % * __src__ Input vector of N-dimensional points (3D/4D points). -% Mx3/Mx1x3/1xMx3 or Mx4/Mx1x4/1xMx4 numeric array, or cell-array of -% 3/4-element vectors in the form: `{[x,y,z], [x,y,z], ...}` or -% `{[x,y,z,w], [x,y,z,w], ...}`. Supports floating-point types. +% Mx3/Mx1x3/1xMx3 or Mx4/Mx1x4/1xMx4 numeric array, or cell-array of +% 3/4-element vectors in the form: `{[x,y,z], [x,y,z], ...}` or +% `{[x,y,z,w], [x,y,z,w], ...}`. Supports floating-point types. % % ## Output % * __dst__ Output vector of (N-1)-dimensional points (2D/3D points). -% Mx2/Mx1x2 or Mx3/Mx1x3 numeric array, or cell-array of 2/3-elements -% vectors, respectively matching the input shape. +% Mx2/Mx1x2 or Mx3/Mx1x3 numeric array, or cell-array of 2/3-elements +% vectors, respectively matching the input shape. % % The function converts points homogeneous to Euclidean space using % perspective projection. That is, each point `(x1, x2, ..., x(n-1), xn)` is diff --git a/+cv/convertPointsToHomogeneous.m b/+cv/convertPointsToHomogeneous.m index 9b5522b39..c64332428 100644 --- a/+cv/convertPointsToHomogeneous.m +++ b/+cv/convertPointsToHomogeneous.m @@ -1,17 +1,17 @@ %CONVERTPOINTSTOHOMOGENEOUS Converts points from Euclidean to homogeneous space % -% dst = cv.convertPointsToHomogeneous(src) +% dst = cv.convertPointsToHomogeneous(src) % % ## Input % * __src__ Input vector of N-dimensional points (2D/3D points). -% Mx2/Mx1x2/1xMx2 or Mx3/Mx1x3/1xMx3 numeric array, or cell-array of -% 2/3-element vectors in the form: `{[x,y], [x,y], ...}` or -% `{[x,y,z], [x,y,z], ...}`. Supports floating-point types. +% Mx2/Mx1x2/1xMx2 or Mx3/Mx1x3/1xMx3 numeric array, or cell-array of +% 2/3-element vectors in the form: `{[x,y], [x,y], ...}` or +% `{[x,y,z], [x,y,z], ...}`. Supports floating-point types. % % ## Output % * __dst__ Output vector of (N+1)-dimensional points (3D/4D points). -% Mx3/Mx1x3 or Mx4/Mx1x4 numeric array, or cell-array of 3/4-elements -% vectors, respectively matching the input shape. +% Mx3/Mx1x3 or Mx4/Mx1x4 numeric array, or cell-array of 3/4-elements +% vectors, respectively matching the input shape. % % The function converts points from Euclidean to homogeneous space by % appending 1's to the tuple of point coordinates. That is, each point diff --git a/+cv/convertScaleAbs.m b/+cv/convertScaleAbs.m index 75043c1f7..187704c89 100644 --- a/+cv/convertScaleAbs.m +++ b/+cv/convertScaleAbs.m @@ -1,7 +1,7 @@ %CONVERTSCALEABS Scales, calculates absolute values, and converts the result to 8-bit % -% dst = cv.convertScaleAbs(src) -% dst = cv.convertScaleAbs(src, 'OptionName',optionValue, ...) +% dst = cv.convertScaleAbs(src) +% dst = cv.convertScaleAbs(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input array. @@ -17,7 +17,7 @@ % three operations sequentially: scaling, taking an absolute value, conversion % to an unsigned 8-bit type: % -% dst(I) = saturate_cast(|src(I) * alpha + beta|) +% dst(I) = saturate_cast(|src(I) * alpha + beta|) % % In case of multi-channel arrays, the function processes each channel % independently. When the output is not 8-bit, the operation can be emulated diff --git a/+cv/convertTo.m b/+cv/convertTo.m index 4a6f63f10..3cf328bca 100644 --- a/+cv/convertTo.m +++ b/+cv/convertTo.m @@ -1,7 +1,7 @@ %CONVERTTO Converts an array to another data type with optional scaling % -% dst = cv.convertTo(src) -% dst = cv.convertTo(src, 'OptionName', optionValue, ...) +% dst = cv.convertTo(src) +% dst = cv.convertTo(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input matrix. @@ -10,17 +10,17 @@ % * __dst__ Output image of the same size as `src`, and the specified type. % % ## Options -% * __RType__ desired output matrix type ('uint8', 'int32', 'double', etc.), -% or rather the depth since the number of channels are the same as the -% input has; if it is negative, the output matrix will have the same -% type as the input. Default -1 +% * __RType__ desired output matrix type (`uint8`, `int32`, `double`, etc.), +% or rather the depth since the number of channels are the same as the input +% has; if it is negative, the output matrix will have the same type as the +% input. Default -1 % * __Alpha__ optional scale factor. default 1.0 % * __Beta__ optional delta added to the scaled values. default 0.0 % % The method converts source pixel values to the target data type. % Saturation is applied at the end to avoid possible overflows: % -% dst = cast(src*alpha + beta, RType); +% dst = cast(src*alpha + beta, RType); % % See also: cv.copyTo, cv.normalize, cast, typecast, im2double, im2uint8, % mat2gray diff --git a/+cv/convexHull.m b/+cv/convexHull.m index 260815e3f..0f76eabf0 100644 --- a/+cv/convexHull.m +++ b/+cv/convexHull.m @@ -1,32 +1,32 @@ %CONVEXHULL Finds the convex hull of a point set % -% hull = cv.convexHull(points) -% hull = cv.convexHull(points, 'OptionName', optionValue, ...) +% hull = cv.convexHull(points) +% hull = cv.convexHull(points, 'OptionName', optionValue, ...) % % ## Input % * __points__ Input 2D point set, stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). % % ## Output % * __hull__ Output convex hull. It is either an integer vector of indices or -% vector of points. In the first case, the hull elements are 0-based -% indices of the convex hull points in the original array (since the set -% of convex hull points is a subset of the original point set). In the -% second case, hull elements are the convex hull points themselves. -% In case output is the hull points, it has the same type as the input. +% vector of points. In the first case, the hull elements are 0-based indices +% of the convex hull points in the original array (since the set of convex +% hull points is a subset of the original point set). In the second case, +% hull elements are the convex hull points themselves. In case output is the +% hull points, it has the same type as the input. % % ## Options % * __ReturnPoints__ Operation flag. In case of a matrix, when the flag is -% true, the function returns convex hull points (Mx2 matrix). Otherwise, -% it returns indices of the convex hull points (vector of length M). -% In case the input is a cell-array, when the flag is true, the function -% return convex hull points (as a cell-array of points). Otherwise it -% returns indices of points (vector of length M). default true +% true, the function returns convex hull points (Mx2 matrix). Otherwise, it +% returns indices of the convex hull points (vector of length M). In case +% the input is a cell-array, when the flag is true, the function return +% convex hull points (as a cell-array of points). Otherwise it returns +% indices of points (vector of length M). default true % * __Clockwise__ Orientation flag. If it is true, the output convex hull is -% oriented clockwise. Otherwise, it is oriented counter-clockwise. The -% usual screen coordinate system is assumed so that the origin is at the -% top-left corner, x axis is oriented to the right, and y axis is -% oriented downwards. default false +% oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual +% screen coordinate system is assumed so that the origin is at the top-left +% corner, x axis is oriented to the right, and y axis is oriented downwards. +% default false % % The function cv.convexHull finds the convex hull of a 2D point set using the % Sklansky's algorithm [Sklansky82] that has `O(N logN)` complexity in the diff --git a/+cv/convexityDefects.m b/+cv/convexityDefects.m index 0952cb35b..2cbb08c1f 100644 --- a/+cv/convexityDefects.m +++ b/+cv/convexityDefects.m @@ -1,23 +1,23 @@ %CONVEXITYDEFECTS Finds the convexity defects of a contour % -% defects = cv.convexityDefects(contour, convexhull) +% defects = cv.convexityDefects(contour, convexhull) % % ## Input % * __contour__ Input contour. 2D point set, stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). % * __convexhull__ Corresponding convex hull obtained using cv.convexHull that -% should contain indices of the contour points that make the hull. -% A numeric vector of 0-based indices. +% should contain indices of the contour points that make the hull. A numeric +% vector of 0-based indices. % % ## Output % * __defects__ The output vector of convexity defects `{[v0,v1,v2,v3], ...}`. -% Each convexity defect is represented as 4-element integer vector -% `[start_index, end_index, farthest_pt_index, fixpt_depth]`, where -% indices are 0-based indices in the original contour of the convexity -% defect (beginning, end and the farthest point), and `fixpt_depth` is -% fixed-point approximation (with 8 fractional bits) of the distance -% between the farthest contour point and the hull. That is, to get the -% floating-point value of the depth will be `fixpt_depth/256.0`. +% Each convexity defect is represented as 4-element integer vector +% `[start_index, end_index, farthest_pt_index, fixpt_depth]`, where indices +% are 0-based indices in the original contour of the convexity defect +% (beginning, end and the farthest point), and `fixpt_depth` is fixed-point +% approximation (with 8 fractional bits) of the distance between the +% farthest contour point and the hull. That is, to get the floating-point +% value of the depth will be `fixpt_depth/256.0`. % % See also: cv.convexHull % diff --git a/+cv/copyMakeBorder.m b/+cv/copyMakeBorder.m index c3ea669f5..43b578658 100644 --- a/+cv/copyMakeBorder.m +++ b/+cv/copyMakeBorder.m @@ -1,35 +1,36 @@ %COPYMAKEBORDER Forms a border around an image % -% dst = cv.copyMakeBorder(src, top, bottom, left, right) -% dst = cv.copyMakeBorder(src, [top, bottom, left, right]) -% [...] = cv.copyMakeBorder(..., 'OptionName', optionValue, ...) +% dst = cv.copyMakeBorder(src, top, bottom, left, right) +% dst = cv.copyMakeBorder(src, [top, bottom, left, right]) +% [...] = cv.copyMakeBorder(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ Source image. % * __top__, __bottom__, __left__, __right__ Parameter specifying how many -% pixels in each direction from the source image rectangle to extrapolate. -% For example, `top=1`, `bottom=1`, `left=1`, `right=1` mean that -% 1 pixel-wide border needs to be built. +% pixels in each direction from the source image rectangle to extrapolate. +% For example, `top=1`, `bottom=1`, `left=1`, `right=1` mean that 1 +% pixel-wide border needs to be built. % % ## Output % * __dst__ Destination image of the same type as `src` and the size -% `[size(src,1)+top+bottom, size(src,2)+left+right, size(src,3)]`. +% `[size(src,1)+top+bottom, size(src,2)+left+right, size(src,3)]`. % % ## Options % * __BorderType__ Border type, one of: -% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` -% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` -% * __Reflect__ `fedcba|abcdefgh|hgfedcb` -% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` -% * __Wrap__ `cdefgh|abcdefgh|abcdefg` -% * __Default__ same as 'Reflect101' (default) +% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` +% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` +% * __Reflect__ `fedcba|abcdefgh|hgfedcb` +% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` +% * __Wrap__ `cdefgh|abcdefgh|abcdefg` +% * __Default__ same as 'Reflect101' (default) % * __Value__ Border value when `BorderType` is 'Constant'. default zeros % -% The function copies the source image into the middle of the destination image. -% The areas to the left, to the right, above and below the copied source image -% will be filled with extrapolated pixels. This is not what filtering functions -% based on it do (they extrapolate pixels on-fly), but what other more complex -% functions, including your own, may do to simplify image boundary handling. +% The function copies the source image into the middle of the destination +% image. The areas to the left, to the right, above and below the copied +% source image will be filled with extrapolated pixels. This is not what +% filtering functions based on it do (they extrapolate pixels on-fly), but +% what other more complex functions, including your own, may do to simplify +% image boundary handling. % % See also: cv.borderInterpolate, padarray % diff --git a/+cv/copyTo.m b/+cv/copyTo.m index 19931780d..40f94b5fe 100644 --- a/+cv/copyTo.m +++ b/+cv/copyTo.m @@ -1,7 +1,7 @@ %COPYTO Copies the matrix to another one % -% dst = cv.copyTo(src) -% dst = cv.copyTo(src, 'OptionName',optionValue, ...) +% dst = cv.copyTo(src) +% dst = cv.copyTo(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input matrix. @@ -11,9 +11,10 @@ % % ## Options % * __Dest__ Used to initialize destination matrix. Not set by default. -% * __Mask__ Optional operation mask. Its non-zero elements indicate which -% matrix elements need to be copied. The mask has to be of type `uint8` -% or `logical` and can have 1 or multiple channels. Not set by default. +% * __Mask__ Optional operation mask of the same size as `src`. Its non-zero +% elements indicate which matrix elements need to be copied. The mask has to +% be of type `uint8` or `logical` and can have 1 or multiple channels. +% Not set by default. % % The method copies the matrix data to another matrix. Before copying the data, % the destination matrix is allocated if needed, initialized with all zeros. diff --git a/+cv/cornerEigenValsAndVecs.m b/+cv/cornerEigenValsAndVecs.m index b95fc69ae..864fde264 100644 --- a/+cv/cornerEigenValsAndVecs.m +++ b/+cv/cornerEigenValsAndVecs.m @@ -1,30 +1,30 @@ %CORNEREIGENVALSANDVECS Calculates eigenvalues and eigenvectors of image blocks for corner detection % -% dst = cv.cornerEigenValsAndVecs(src) -% dst = cv.cornerEigenValsAndVecs(src, 'OptionName', optionValue, ...) +% dst = cv.cornerEigenValsAndVecs(src) +% dst = cv.cornerEigenValsAndVecs(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input single-channel 8-bit or floating-point image. % % ## Output % * __dst__ Image to store the results. It has the same row/column size as -% `src` and the `single` type (where `size(dst,3)==6`). -% Eigenvalues and eigenvectors are stored along the channels, see below. +% `src` and the `single` type (where `size(dst,3)==6`). Eigenvalues and +% eigenvectors are stored along the channels, see below. % % ## Options % * __BlockSize__ Neighborhood size (see details below). default 5. % * __KSize__ Aperture parameter for the cv.Sobel operator. default 3. % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % % For every pixel `p`, the function cv.cornerEigenValsAndVecs considers a % `blockSize x blockSize` neigborhood `S(p)`. It calculates the covariation % matrix of derivatives over the neighborhood as: % -% M = [ -% sum_{S(p)}(dI/dx)^2 sum_{S(p)}(dI/dx * dI/dy) -% sum_{S(p)}(dI/dx * dI/dy) sum_{S(p)}(dI/dy)^2 -% ] +% M = [ +% sum_{S(p)}(dI/dx)^2 sum_{S(p)}(dI/dx * dI/dy) +% sum_{S(p)}(dI/dx * dI/dy) sum_{S(p)}(dI/dy)^2 +% ] % % where the derivatives are computed using the Sobel operator. % diff --git a/+cv/cornerHarris.m b/+cv/cornerHarris.m index edfbc6ac4..c848007eb 100644 --- a/+cv/cornerHarris.m +++ b/+cv/cornerHarris.m @@ -1,22 +1,22 @@ %CORNERHARRIS Harris corner detector % -% dst = cv.cornerHarris(src) -% dst = cv.cornerHarris(src, 'OptionName', optionValue, ...) +% dst = cv.cornerHarris(src) +% dst = cv.cornerHarris(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input single-channel 8-bit or floating-point image. % % ## Output % * __dst__ Image to store the Harris detector responses. It has the type -% `single` and the same size as `src` (single-channel). +% `single` and the same size as `src` (single-channel). % % ## Options % * __BlockSize__ Neighborhood size (see the details on -% cv.cornerEigenValsAndVecs). default 5. +% cv.cornerEigenValsAndVecs). default 5. % * __KSize__ Aperture parameter for the cv.Sobel operator. default 3. % * __K__ Harris detector free parameter. See the formula below. default 0.04 % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % % The function runs the Harris corner detector on the image. Similarly to % cv.cornerMinEigenVal and cv.cornerEigenValsAndVecs, for each pixel `(x,y)` @@ -24,7 +24,7 @@ % `BlockSize x BlockSize` neighborhood. Then, it computes the following % characteristic: % -% dst(x,y) = det(M(x,y)) - k * (trace(M(x,y)))^2 +% dst(x,y) = det(M(x,y)) - k * (trace(M(x,y)))^2 % % Corners in the image can be found as the local maxima of this response map. % diff --git a/+cv/cornerMinEigenVal.m b/+cv/cornerMinEigenVal.m index abb1052d2..a019b11fb 100644 --- a/+cv/cornerMinEigenVal.m +++ b/+cv/cornerMinEigenVal.m @@ -1,21 +1,21 @@ %CORNERMINEIGENVAL Calculates the minimal eigenvalue of gradient matrices for corner detection % -% dst = cv.cornerMinEigenVal(src) -% dst = cv.cornerMinEigenVal(src, 'OptionName', optionValue, ...) +% dst = cv.cornerMinEigenVal(src) +% dst = cv.cornerMinEigenVal(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input single-channel 8-bit or floating-point image. % % ## Output % * __dst__ Image to store the minimal eigenvalues. It has the same size as -% `src` and the `single` type (single-channel). +% `src` and the `single` type (single-channel). % % ## Options % * __BlockSize__ Neighborhood size (see the details on -% cv.cornerEigenValsAndVecs). default 5. +% cv.cornerEigenValsAndVecs). default 5. % * __KSize__ Aperture parameter for the cv.Sobel operator. default 3. % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % % The function is similar to cv.cornerEigenValsAndVecs but it calculates and % stores only the minimal eigenvalue of the covariance matrix of derivatives, diff --git a/+cv/cornerSubPix.m b/+cv/cornerSubPix.m index d3b4bb43c..2f53dd083 100644 --- a/+cv/cornerSubPix.m +++ b/+cv/cornerSubPix.m @@ -1,37 +1,37 @@ %CORNERSUBPIX Refines the corner locations % -% corners = cv.cornerSubPix(im, corners) -% corners = cv.cornerSubPix(im, corners, 'OptionName', optionValue, ...) +% corners = cv.cornerSubPix(im, corners) +% corners = cv.cornerSubPix(im, corners, 'OptionName', optionValue, ...) % % ## Input % * __im__ Input single-channel image. % * __corners__ Initial coordinates of the input corners, stored in numeric -% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors -% (`{[x,y], ...}`). Supports single floating-point class. +% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors +% (`{[x,y], ...}`). Supports single floating-point class. % % ## Output % * __corners__ Output refined coordinates, of the same size and type as the -% input `corners` (numeric or cell matching the input format). +% input `corners` (numeric or cell matching the input format). % % ## Options % * __WinSize__ Half of the side length of the search window. For example, if -% `WinSize=[5,5]`, then a `(5 * 2 + 1) x (5 * 2 + 1) = 11 x 11` search -% window is used. default [3, 3]. +% `WinSize=[5,5]`, then a `(5 * 2 + 1) x (5 * 2 + 1) = 11 x 11` search +% window is used. default [3, 3]. % * __ZeroZone__ Half of the size of the dead region in the middle of the -% search zone over which the summation in the formula below is not done. -% It is used sometimes to avoid possible singularities of the -% autocorrelation matrix. The value of `[-1,-1]` indicates that there -% is no such a size. default [-1,-1]. +% search zone over which the summation in the formula below is not done. It +% is used sometimes to avoid possible singularities of the autocorrelation +% matrix. The value of `[-1,-1]` indicates that there is no such a size. +% default [-1,-1]. % * __Criteria__ Criteria for termination of the iterative process of corner -% refinement. That is, the process of corner position refinement stops -% either after `criteria.maxCount` iterations or when the corner position -% moves by less than `criteria.epsilon` on some iteration. Default to -% `struct('type','Count+EPS', 'maxCount',50, 'epsilon',0.001)`. -% Struct with the following fields is accepted: -% * __type__ one of 'Count', 'EPS', or 'Count+EPS' to indicate which -% criteria to use. -% * __maxCount__ maximum number of iterations -% * __epsilon__ minimum difference in corner position +% refinement. That is, the process of corner position refinement stops +% either after `criteria.maxCount` iterations or when the corner position +% moves by less than `criteria.epsilon` on some iteration. Default to +% `struct('type','Count+EPS', 'maxCount',50, 'epsilon',0.001)`. Struct with +% the following fields is accepted: +% * __type__ one of 'Count', 'EPS', or 'Count+EPS' to indicate which +% criteria to use. +% * __maxCount__ maximum number of iterations +% * __epsilon__ minimum difference in corner position % % The function iterates to find the sub-pixel accurate location of corners or % radial saddle points. @@ -41,19 +41,19 @@ % `q` is orthogonal to the image gradient at `p` subject to image and % measurement noise. Consider the expression: % -% epsilon_i = DI_p_i' * (q - p_i) +% epsilon_i = DI_p_i' * (q - p_i) % % where `DI_p_i` is an image gradient at one of the points `p_i` in a % neighborhood of `q`. The value of `q` is to be found so that `epsilon_i` is % minimized. A system of equations may be set up with `epsilon_i` set to zero: % -% Sigma_i(DI_p_i * DI_p_i') - Sigma_i(DI_p_i * DI_p_i' * p_i) +% Sigma_i(DI_p_i * DI_p_i') - Sigma_i(DI_p_i * DI_p_i' * p_i) % % where the gradients are summed within a neighborhood ("search window") of % `q`. Calling the first gradient term `G` and the second gradient term `b` % gives: % -% q = inv(G) * b +% q = inv(G) * b % % The algorithm sets the center of the neighborhood window at this new center % `q` and then iterates until the center stays within a set threshold. diff --git a/+cv/correctMatches.m b/+cv/correctMatches.m index 6ce463fe8..688f416ea 100644 --- a/+cv/correctMatches.m +++ b/+cv/correctMatches.m @@ -1,17 +1,16 @@ %CORRECTMATCHES Refines coordinates of corresponding points % -% [newPoints1, newPoints2] = cv.correctMatches(F, points1, points2) +% [newPoints1, newPoints2] = cv.correctMatches(F, points1, points2) % % ## Input % * __F__ 3x3 fundamental matrix. % * __points1__ first set of 2D points. A numeric Nx2/Nx1x2/1xNx2 array or a -% cell array of 2-element vectors `{[x,y], ...}` (floating-point -% precision). +% cell array of 2-element vectors `{[x,y], ...}` (floating-point precision). % * __points2__ second set of 2D points. Same size and type as `points1`. % % ## Output % * __newPoints1__ The optimized `points1`. Similar in shape to `points1` -% (either Nx2/1xNx2 numeric array or cell array of 2D points). +% (either Nx2/1xNx2 numeric array or cell array of 2D points). % * __newPoints2__ The optimized `points2`. % % The function implements the Optimal Triangulation Method (see [Hartley2004] @@ -19,12 +18,12 @@ % and a fundamental matrix `F`, it computes the corrected correspondences % `newPoints1[i] <-> newPoints2[i]` that minimize the geometric error: % -% d(points1[i], newPoints1[i])^2 + d(points2[i], newPoints2[i])^2 +% d(points1[i], newPoints1[i])^2 + d(points2[i], newPoints2[i])^2 % % (where `d(a,b)` is the geometric distance between points `a` and `b`) % subject to the epipolar constraint % -% newPoints2' * F * newPoints1 = 0 +% newPoints2' * F * newPoints1 = 0 % % ## References % [Hartley2004]: diff --git a/+cv/createConcentricSpheresTestSet.m b/+cv/createConcentricSpheresTestSet.m index f1672ccef..a3fa23a57 100644 --- a/+cv/createConcentricSpheresTestSet.m +++ b/+cv/createConcentricSpheresTestSet.m @@ -1,6 +1,6 @@ %CREATECONCENTRICSPHERESTESTSET Creates test set % -% [samples, responses] = cv.createConcentricSpheresTestSet(nsamples, nfeatures, nclasses) +% [samples, responses] = cv.createConcentricSpheresTestSet(nsamples, nfeatures, nclasses) % % ## Input % * __nsamples__ returned samples count. diff --git a/+cv/createHanningWindow.m b/+cv/createHanningWindow.m index 8411229be..c4937ac54 100644 --- a/+cv/createHanningWindow.m +++ b/+cv/createHanningWindow.m @@ -1,21 +1,22 @@ %CREATEHANNINGWINDOW Computes a Hanning window coefficients in two dimensions % -% dst = cv.createHanningWindow(winSize) -% dst = cv.createHanningWindow(winSize, 'Type',type) +% dst = cv.createHanningWindow(winSize) +% dst = cv.createHanningWindow(winSize, 'Type',type) % % ## Input -% * __winSize__ The window size specifications `[w,h]`. +% * __winSize__ The window size specifications `[w,h]` (both width and height +% must be > 1). % % ## Output % * __dst__ Destination array containing Hann coefficients. % % ## Options -% * __Type__ Created array type. Either 'single' or 'double' (default). +% * __Type__ Created array type. Either `single` or `double` (default). % % This function computes a Hanning window coefficients in two dimensions. % -% See [Hann function](http://en.wikipedia.org/wiki/Hann_function) and -% [Window function](http://en.wikipedia.org/wiki/Window_function) for more +% See [Hann function](https://en.wikipedia.org/wiki/Hann_function) and +% [Window function](https://en.wikipedia.org/wiki/Window_function) for more % information. % % See also: hann, hanning diff --git a/+cv/cvtColor.m b/+cv/cvtColor.m index 8f4133f34..a9dbf8ed3 100644 --- a/+cv/cvtColor.m +++ b/+cv/cvtColor.m @@ -1,102 +1,97 @@ %CVTCOLOR Converts an image from one color space to another % -% dst = cv.cvtColor(src, code) -% dst = cv.cvtColor(src, code, 'OptionName',optionValue, ...) +% dst = cv.cvtColor(src, code) +% dst = cv.cvtColor(src, code, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input image: 8-bit unsigned, 16-bit unsigned, or single-precision -% floating-point. -% * __code__ Color space conversion code string, e.g., 'RGB2GRAY'. -% The following codes are supported: -% * __BGR2BGRA__, __RGB2RGBA__: add alpha channel to RGB and BGR image -% * __BGRA2BGR__, __RGBA2RGB__: remove alpha channel from RGB and BGR -% image -% * __BGR2RGBA__, __RGB2BGRA__, __RGBA2BGR__, __BGRA2RGB__, __BGR2RGB__, -% __RGB2BGR__, __BGRA2RGBA__, __RGBA2BGRA__: convert between RGB and -% BGR color spaces (with or without alpha channel) -% * __BGR2GRAY__, __RGB2GRAY__, __GRAY2BGR__, __GRAY2RGB__, -% __GRAY2BGRA__, __GRAY2RGBA__, __BGRA2GRAY__, __RGBA2GRAY__: convert -% between RGB/BGR and grayscale -% * __BGR2BGR565__, __RGB2BGR565__, __BGR5652BGR__, __BGR5652RGB__, -% __BGRA2BGR565__, __RGBA2BGR565__, __BGR5652BGRA__, __BGR5652RGBA__: -% convert between RGB/BGR and BGR565 (16-bit images) -% * __GRAY2BGR565__, __BGR5652GRAY__: convert between grayscale and -% BGR565 (16-bit images) -% * __BGR2BGR555__, __RGB2BGR555__, __BGR5552BGR__, __BGR5552RGB__, -% __BGRA2BGR555__, __RGBA2BGR555__, __BGR5552BGRA__, __BGR5552RGBA__: -% convert between RGB/BGR and BGR555 (16-bit images) -% * __GRAY2BGR555__, __BGR5552GRAY__: convert between grayscale and -% BGR555 (16-bit images) -% * __BGR2XYZ__, __RGB2XYZ__, __XYZ2BGR__, __XYZ2RGB__: convert between -% RGB/BGR and CIE XYZ -% * __BGR2YCrCb__, __RGB2YCrCb__, __YCrCb2BGR__, __YCrCb2RGB__: convert -% between RGB/BGR and luma-chroma (aka YCC) -% * __BGR2YUV__, __RGB2YUV__, __YUV2BGR__, __YUV2RGB__: convert between -% RGB/BGR and YUV -% * __BGR2HSV__, __RGB2HSV__, __HSV2BGR__, __HSV2RGB__, **BGR2HSV_FULL**, -% **RGB2HSV_FULL**, **HSV2BGR_FULL**, **HSV2RGB_FULL**: convert between -% RGB/BGR and HSV (hue saturation value) -% * __BGR2HLS__, __RGB2HLS__, __HLS2BGR__, __HLS2RGB__, **BGR2HLS_FULL**, -% **RGB2HLS_FULL**, **HLS2BGR_FULL**, **HLS2RGB_FULL**: convert between -% RGB/BGR and HLS (hue lightness saturation) -% * __BGR2Lab__, __RGB2Lab__, __Lab2BGR__, __Lab2RGB__, __LBGR2Lab__, -% __LRGB2Lab__, __Lab2LBGR__, __Lab2LRGB__: convert between RGB/BGR -% and CIE Lab -% * __BGR2Luv__, __RGB2Luv__, __Luv2BGR__, __Luv2RGB__, __LBGR2Luv__, -% __LRGB2Luv__, __Luv2LBGR__, __Luv2LRGB__: convert between RGB/BGR -% and CIE Luv -% * **YUV2RGB_NV12**, **YUV2BGR_NV12**, **YUV2RGB_NV21**, -% **YUV2BGR_NV21**, __YUV420sp2RGB__, __YUV420sp2BGR__, -% **YUV2RGBA_NV12**, **YUV2BGRA_NV12**, **YUV2RGBA_NV21**, -% **YUV2BGRA_NV21**, __YUV420sp2RGBA__, __YUV420sp2BGRA__, -% **YUV2RGB_YV12**, **YUV2BGR_YV12**, **YUV2RGB_IYUV**, -% **YUV2BGR_IYUV**, **YUV2RGB_I420**, **YUV2BGR_I420**, -% __YUV420p2RGB__, __YUV420p2BGR__, **YUV2RGBA_YV12**, -% **YUV2BGRA_YV12**, **YUV2RGBA_IYUV**, **YUV2BGRA_IYUV**, -% **YUV2RGBA_I420**, **YUV2BGRA_I420**, __YUV420p2RGBA__, -% __YUV420p2BGRA__, **YUV2GRAY_420**, **YUV2GRAY_NV21**, -% **YUV2GRAY_NV12**, **YUV2GRAY_YV12**, **YUV2GRAY_IYUV**, -% **YUV2GRAY_I420**, __YUV420sp2GRAY__, __YUV420p2GRAY__: -% YUV 4:2:0 family to RGB -% * **YUV2RGB_UYVY**, **YUV2BGR_UYVY**, **YUV2RGB_Y422**, -% **YUV2BGR_Y422**, **YUV2RGB_UYNV**, **YUV2BGR_UYNV**, -% **YUV2RGBA_UYVY**, **YUV2BGRA_UYVY**, **YUV2RGBA_Y422**, -% **YUV2BGRA_Y422**, **YUV2RGBA_UYNV**, **YUV2BGRA_UYNV**, -% **YUV2RGB_YUY2**, **YUV2BGR_YUY2**, **YUV2RGB_YVYU**, -% **YUV2BGR_YVYU**, **YUV2RGB_YUYV**, **YUV2BGR_YUYV**, -% **YUV2RGB_YUNV**, **YUV2BGR_YUNV**, **YUV2RGBA_YUY2**, -% **YUV2BGRA_YUY2**, **YUV2RGBA_YVYU**, **YUV2BGRA_YVYU**, -% **YUV2RGBA_YUYV**, **YUV2BGRA_YUYV**, **YUV2RGBA_YUNV**, -% **YUV2BGRA_YUNV**, **YUV2GRAY_UYVY**, **YUV2GRAY_YUY2**, -% **YUV2GRAY_Y422**, **YUV2GRAY_UYNV**, **YUV2GRAY_YVYU**, -% **YUV2GRAY_YUYV**, **YUV2GRAY_YUNV**: YUV 4:2:2 family to RGB -% * __RGBA2mRGBA__, __mRGBA2RGBA__: alpha premultiplication -% * **RGB2YUV_I420**, **BGR2YUV_I420**, **RGB2YUV_IYUV**, -% **BGR2YUV_IYUV**, **RGBA2YUV_I420**, **BGRA2YUV_I420**, -% **RGBA2YUV_IYUV**, **BGRA2YUV_IYUV**, **RGB2YUV_YV12**, -% **BGR2YUV_YV12**, **RGBA2YUV_YV12**, **BGRA2YUV_YV12**: -% RGB to YUV 4:2:0 family -% * __BayerBG2BGR__, __BayerGB2BGR__, __BayerRG2BGR__, __BayerGR2BGR__, -% __BayerBG2RGB__, __BayerGB2RGB__, __BayerRG2RGB__, __BayerGR2RGB__, -% __BayerBG2GRAY__, __BayerGB2GRAY__, __BayerRG2GRAY__, -% __BayerGR2GRAY__, __BayerBG2BGRA__, __BayerGB2BGRA__, -% __BayerRG2BGRA__, __BayerGR2BGRA__, __BayerBG2RGBA__, -% __BayerGB2RGBA__, __BayerRG2RGBA__, __BayerGR2RGBA__: Demosaicing -% * **BayerBG2BGR_VNG**, **BayerGB2BGR_VNG**, **BayerRG2BGR_VNG**, -% **BayerGR2BGR_VNG**, **BayerBG2RGB_VNG**, **BayerGB2RGB_VNG**, -% **BayerRG2RGB_VNG**, **BayerGR2RGB_VNG**: Demosaicing using -% Variable Number of Gradients -% * **BayerBG2BGR_EA**, **BayerGB2BGR_EA**, **BayerRG2BGR_EA**, -% **BayerGR2BGR_EA**, **BayerBG2RGB_EA**, **BayerGB2RGB_EA**, -% **BayerRG2RGB_EA**, **BayerGR2RGB_EA**: Edge-Aware Demosaicing +% floating-point. +% * __code__ Color space conversion code string, e.g., 'RGB2GRAY'. The +% following codes are supported: +% * __BGR2BGRA__, __RGB2RGBA__: add alpha channel to RGB and BGR image +% * __BGRA2BGR__, __RGBA2RGB__: remove alpha channel from RGB and BGR image +% * __BGR2RGBA__, __RGB2BGRA__, __RGBA2BGR__, __BGRA2RGB__, __BGR2RGB__, +% __RGB2BGR__, __BGRA2RGBA__, __RGBA2BGRA__: convert between RGB and BGR +% color spaces (with or without alpha channel) +% * __BGR2GRAY__, __RGB2GRAY__, __GRAY2BGR__, __GRAY2RGB__, __GRAY2BGRA__, +% __GRAY2RGBA__, __BGRA2GRAY__, __RGBA2GRAY__: convert between RGB/BGR and +% grayscale +% * __BGR2BGR565__, __RGB2BGR565__, __BGR5652BGR__, __BGR5652RGB__, +% __BGRA2BGR565__, __RGBA2BGR565__, __BGR5652BGRA__, __BGR5652RGBA__: +% convert between RGB/BGR and BGR565 (16-bit images) +% * __GRAY2BGR565__, __BGR5652GRAY__: convert between grayscale and BGR565 +% (16-bit images) +% * __BGR2BGR555__, __RGB2BGR555__, __BGR5552BGR__, __BGR5552RGB__, +% __BGRA2BGR555__, __RGBA2BGR555__, __BGR5552BGRA__, __BGR5552RGBA__: +% convert between RGB/BGR and BGR555 (16-bit images) +% * __GRAY2BGR555__, __BGR5552GRAY__: convert between grayscale and BGR555 +% (16-bit images) +% * __BGR2XYZ__, __RGB2XYZ__, __XYZ2BGR__, __XYZ2RGB__: convert between +% RGB/BGR and CIE XYZ +% * __BGR2YCrCb__, __RGB2YCrCb__, __YCrCb2BGR__, __YCrCb2RGB__: convert +% between RGB/BGR and luma-chroma (aka YCC) +% * __BGR2YUV__, __RGB2YUV__, __YUV2BGR__, __YUV2RGB__: convert between +% RGB/BGR and YUV +% * __BGR2HSV__, __RGB2HSV__, __HSV2BGR__, __HSV2RGB__, **BGR2HSV_FULL**, +% **RGB2HSV_FULL**, **HSV2BGR_FULL**, **HSV2RGB_FULL**: convert between +% RGB/BGR and HSV (hue saturation value) +% * __BGR2HLS__, __RGB2HLS__, __HLS2BGR__, __HLS2RGB__, **BGR2HLS_FULL**, +% **RGB2HLS_FULL**, **HLS2BGR_FULL**, **HLS2RGB_FULL**: convert between +% RGB/BGR and HLS (hue lightness saturation) +% * __BGR2Lab__, __RGB2Lab__, __Lab2BGR__, __Lab2RGB__, __LBGR2Lab__, +% __LRGB2Lab__, __Lab2LBGR__, __Lab2LRGB__: convert between RGB/BGR and +% CIE Lab +% * __BGR2Luv__, __RGB2Luv__, __Luv2BGR__, __Luv2RGB__, __LBGR2Luv__, +% __LRGB2Luv__, __Luv2LBGR__, __Luv2LRGB__: convert between RGB/BGR and +% CIE Luv +% * **YUV2RGB_NV12**, **YUV2BGR_NV12**, **YUV2RGB_NV21**, **YUV2BGR_NV21**, +% __YUV420sp2RGB__, __YUV420sp2BGR__, **YUV2RGBA_NV12**, **YUV2BGRA_NV12**, +% **YUV2RGBA_NV21**, **YUV2BGRA_NV21**, __YUV420sp2RGBA__, +% __YUV420sp2BGRA__, **YUV2RGB_YV12**, **YUV2BGR_YV12**, **YUV2RGB_IYUV**, +% **YUV2BGR_IYUV**, **YUV2RGB_I420**, **YUV2BGR_I420**, __YUV420p2RGB__, +% __YUV420p2BGR__, **YUV2RGBA_YV12**, **YUV2BGRA_YV12**, **YUV2RGBA_IYUV**, +% **YUV2BGRA_IYUV**, **YUV2RGBA_I420**, **YUV2BGRA_I420**, +% __YUV420p2RGBA__, __YUV420p2BGRA__, **YUV2GRAY_420**, **YUV2GRAY_NV21**, +% **YUV2GRAY_NV12**, **YUV2GRAY_YV12**, **YUV2GRAY_IYUV**, +% **YUV2GRAY_I420**, __YUV420sp2GRAY__, __YUV420p2GRAY__: YUV 4:2:0 family +% to RGB +% * **YUV2RGB_UYVY**, **YUV2BGR_UYVY**, **YUV2RGB_Y422**, **YUV2BGR_Y422**, +% **YUV2RGB_UYNV**, **YUV2BGR_UYNV**, **YUV2RGBA_UYVY**, +% **YUV2BGRA_UYVY**, **YUV2RGBA_Y422**, **YUV2BGRA_Y422**, +% **YUV2RGBA_UYNV**, **YUV2BGRA_UYNV**, **YUV2RGB_YUY2**, **YUV2BGR_YUY2**, +% **YUV2RGB_YVYU**, **YUV2BGR_YVYU**, **YUV2RGB_YUYV**, **YUV2BGR_YUYV**, +% **YUV2RGB_YUNV**, **YUV2BGR_YUNV**, **YUV2RGBA_YUY2**, **YUV2BGRA_YUY2**, +% **YUV2RGBA_YVYU**, **YUV2BGRA_YVYU**, **YUV2RGBA_YUYV**, +% **YUV2BGRA_YUYV**, **YUV2RGBA_YUNV**, **YUV2BGRA_YUNV**, +% **YUV2GRAY_UYVY**, **YUV2GRAY_YUY2**, **YUV2GRAY_Y422**, +% **YUV2GRAY_UYNV**, **YUV2GRAY_YVYU**, **YUV2GRAY_YUYV**, +% **YUV2GRAY_YUNV**: YUV 4:2:2 family to RGB +% * __RGBA2mRGBA__, __mRGBA2RGBA__: alpha premultiplication +% * **RGB2YUV_I420**, **BGR2YUV_I420**, **RGB2YUV_IYUV**, **BGR2YUV_IYUV**, +% **RGBA2YUV_I420**, **BGRA2YUV_I420**, **RGBA2YUV_IYUV**, +% **BGRA2YUV_IYUV**, **RGB2YUV_YV12**, **BGR2YUV_YV12**, **RGBA2YUV_YV12**, +% **BGRA2YUV_YV12**: RGB to YUV 4:2:0 family +% * __BayerBG2BGR__, __BayerGB2BGR__, __BayerRG2BGR__, __BayerGR2BGR__, +% __BayerBG2RGB__, __BayerGB2RGB__, __BayerRG2RGB__, __BayerGR2RGB__, +% __BayerBG2GRAY__, __BayerGB2GRAY__, __BayerRG2GRAY__, __BayerGR2GRAY__, +% __BayerBG2BGRA__, __BayerGB2BGRA__, __BayerRG2BGRA__, __BayerGR2BGRA__, +% __BayerBG2RGBA__, __BayerGB2RGBA__, __BayerRG2RGBA__, __BayerGR2RGBA__: +% Demosaicing +% * **BayerBG2BGR_VNG**, **BayerGB2BGR_VNG**, **BayerRG2BGR_VNG**, +% **BayerGR2BGR_VNG**, **BayerBG2RGB_VNG**, **BayerGB2RGB_VNG**, +% **BayerRG2RGB_VNG**, **BayerGR2RGB_VNG**: Demosaicing using Variable +% Number of Gradients +% * **BayerBG2BGR_EA**, **BayerGB2BGR_EA**, **BayerRG2BGR_EA**, +% **BayerGR2BGR_EA**, **BayerBG2RGB_EA**, **BayerGB2RGB_EA**, +% **BayerRG2RGB_EA**, **BayerGR2RGB_EA**: Edge-Aware Demosaicing % % ## Output % * __dst__ Output image of the same row/column size and depth as `src`. % % ## Options -% * __DstCn__ Number of channels in the destination image. If the parameter -% is 0, the number of the channels is derived automatically from `src` -% and `code`. +% * __DstCn__ Number of channels in the destination image. If the parameter is +% 0, the number of the channels is derived automatically from `src` and +% `code`. % % The function converts an input image from one color space to another. In case % of a transformation to-from RGB color space, the order of the channels should @@ -121,7 +116,7 @@ % will have the 0..255 value range instead of 0..1 assumed by the function. % So, before calling cv.cvtColor, you need first to scale the image down: % -% img = cvtColor(img./255, 'BGR2Luv'); +% img = cvtColor(img./255, 'BGR2Luv'); % % If you use cv.cvtColor with 8-bit images, the conversion will have some % information lost. For many applications, this will not be noticeable but it @@ -133,8 +128,6 @@ % corresponding channel range: 255 for `uint8`, 65535 for `uint16`, and 1 for % `single`. % -% --- -% % # Color Conversions % % ## RGB ⇔ GRAY @@ -143,15 +136,15 @@ % reversing the channel order, conversion to/from 16-bit RGB color (`R5:G6:B5` % or `R5:G5:B5`), as well as conversion to/from grayscale using: % -% RGB[A] to Gray: Y = 0.299*R + 0.587*G + 0.114*B +% RGB[A] to Gray: Y = 0.299*R + 0.587*G + 0.114*B % % and % -% Gray to RGB[A]: R = Y, G = Y, B = Y, A = max(ChannelRange) +% Gray to RGB[A]: R = Y, G = Y, B = Y, A = max(ChannelRange) % % The conversion from a RGB image to gray is done with: % -% bwsrc = cv.cvtColor(src, 'RGB2GRAY'); +% bwsrc = cv.cvtColor(src, 'RGB2GRAY'); % % More advanced channel reordering can also be done with cv.mixChannels. % @@ -159,13 +152,13 @@ % % ## RGB ⇔ CIE XYZ.Rec 709 with D65 white point % -% [X;Y;Z] = [0.412453, 0.357580, 0.180423; -% 0.212671, 0.715160, 0.072169; -% 0.019334, 0.119193, 0.950227] * [R;G;B] +% [X;Y;Z] = [0.412453, 0.357580, 0.180423; +% 0.212671, 0.715160, 0.072169; +% 0.019334, 0.119193, 0.950227] * [R;G;B] % -% [R;G;B] = [3.240479, -1.53715, -0.498535; -% -0.969256, 1.875991, 0.041556; -% 0.055648, -0.204043, 1.057311] * [X;Y;Z] +% [R;G;B] = [3.240479, -1.53715, -0.498535; +% -0.969256, 1.875991, 0.041556; +% 0.055648, -0.204043, 1.057311] * [X;Y;Z] % % `X`, `Y`, and `Z` cover the whole value range (in case of floating-point % images, `Z` may exceed 1). @@ -174,18 +167,18 @@ % % ## RGB ⇔ YCrCb JPEG (or YCC) % -% Y = 0.299*R + 0.587G + 0.114B -% Cb = (R-Y)*0.713 + delta -% Cr = (B-Y)*0.564 + delta -% R = Y + 1.403*(Cr-delta) -% G = Y - 0.714*(Cr-delta) - 0.344*(Cb-delta) -% B = Y + 1.773*(Cb-delta) +% Y = 0.299*R + 0.587G + 0.114B +% Cb = (R-Y)*0.713 + delta +% Cr = (B-Y)*0.564 + delta +% R = Y + 1.403*(Cr-delta) +% G = Y - 0.714*(Cr-delta) - 0.344*(Cb-delta) +% B = Y + 1.773*(Cb-delta) % % where % -% / 128 for 8-bit images -% delta = | 32768 for 16-bit images -% \ 0.5 for floating-point images +% / 128 for 8-bit images +% delta = | 32768 for 16-bit images +% \ 0.5 for floating-point images % % `Y`, `Cr`, and `Cb` cover the whole value range. % @@ -196,12 +189,12 @@ % In case of 8-bit and 16-bit images, `R`, `G`, and `B` are converted to the % floating-point format and scaled to fit the 0 to 1 range. % -% V = max(R,G,B) -% S = / (V - min(R,G,B)) / V if V != 0 -% \ 0 otherwise -% / 60*(G-B) / (V - min(R,G,B)) if V=R -% H = | 120 + 60*(B-R) / (V - min(R,G,B)) if V=G -% \ 240 + 60*(R-G) / (V - min(R,G,B)) if V=B +% V = max(R,G,B) +% S = / (V - min(R,G,B)) / V if V != 0 +% \ 0 otherwise +% / 60*(G-B) / (V - min(R,G,B)) if V=R +% H = | 120 + 60*(B-R) / (V - min(R,G,B)) if V=G +% \ 240 + 60*(R-G) / (V - min(R,G,B)) if V=B % % If `H<0` then `H=H+360`. On output `0<=V<=1`, `0<=S<=1`, `0<=H<=360`. % @@ -218,14 +211,14 @@ % In case of 8-bit and 16-bit images, `R`, `G`, and `B` are converted to the % floating-point format and scaled to fit the 0 to 1 range. % -% Vmax = max(R,G,B) -% Vmin = min(R,G,B) -% L = (Vmax + Vmin)/2 -% S = / (Vmax - Vmin)/(Vmax + Vmin) if L < 0.5 -% \ (Vmax - Vmin)/(2-(Vmax + Vmin)) if L >= 0.5 -% / 60*(G-B) / (Vmax - Vmin) if Vmax=R -% H = | 120 + 60*(B-R) / (Vmax - Vmin) if Vmax=G -% \ 240 + 60*(R-G) / (Vmax - Vmin) if Vmax=B +% Vmax = max(R,G,B) +% Vmin = min(R,G,B) +% L = (Vmax + Vmin)/2 +% S = / (Vmax - Vmin)/(Vmax + Vmin) if L < 0.5 +% \ (Vmax - Vmin)/(2-(Vmax + Vmin)) if L >= 0.5 +% / 60*(G-B) / (Vmax - Vmin) if Vmax=R +% H = | 120 + 60*(B-R) / (Vmax - Vmin) if Vmax=G +% \ 240 + 60*(R-G) / (Vmax - Vmin) if Vmax=B % % If `H<0` then `H=H+360`. On output `0<=L<=1`, `0<=S<=1`, `0<=H<=360`. % @@ -242,28 +235,28 @@ % In case of 8-bit and 16-bit images, `R`, `G`, and `B` are converted to the % floating-point format and scaled to fit the 0 to 1 range. % -% [X;Y;Z] = [0.412453, 0.357580, 0.180423; -% 0.212671, 0.715160, 0.072169; -% 0.019334, 0.119193, 0.950227] * [R;G;B] +% [X;Y;Z] = [0.412453, 0.357580, 0.180423; +% 0.212671, 0.715160, 0.072169; +% 0.019334, 0.119193, 0.950227] * [R;G;B] % -% X = X/Xn, where Xn = 0.950456 -% Z = Z/Zn, where Zn = 1.088754 +% X = X/Xn, where Xn = 0.950456 +% Z = Z/Zn, where Zn = 1.088754 % -% L = / 116 * Y^(1/3) - 16 for Y > 0.008856 -% \ 903.3 * Y for Y <= 0.008856 +% L = / 116 * Y^(1/3) - 16 for Y > 0.008856 +% \ 903.3 * Y for Y <= 0.008856 % -% a = 500*(f(X) - f(Y)) + delta -% b = 200*(f(Y) - f(Z)) + delta +% a = 500*(f(X) - f(Y)) + delta +% b = 200*(f(Y) - f(Z)) + delta % % where % -% f(t) = / t^(1/3) for t < 0.008856 -% \ 7.787*t + 16/116 for t >= 0.008856 +% f(t) = / t^(1/3) for t < 0.008856 +% \ 7.787*t + 16/116 for t >= 0.008856 % % and % -% delta = / 128 for 8-bit images -% \ 0 for floating-point images +% delta = / 128 for 8-bit images +% \ 0 for floating-point images % % This outputs `0<=L<=100`, `-127<=a<=127`, `-127<=b<=127`. The values are % then converted to the destination data type: @@ -279,18 +272,18 @@ % In case of 8-bit and 16-bit images, `R`, `G`, and `B` are converted to the % floating-point format and scaled to fit 0 to 1 range. % -% [X;Y;Z] = [0.412453, 0.357580, 0.180423; -% 0.212671, 0.715160, 0.072169; -% 0.019334, 0.119193, 0.950227] * [R;G;B] +% [X;Y;Z] = [0.412453, 0.357580, 0.180423; +% 0.212671, 0.715160, 0.072169; +% 0.019334, 0.119193, 0.950227] * [R;G;B] % -% L = / 116 * Y^(1/3) - 16 for Y > 0.008856 -% \ 903.3 * Y for Y <= 0.008856 +% L = / 116 * Y^(1/3) - 16 for Y > 0.008856 +% \ 903.3 * Y for Y <= 0.008856 % -% u' = 4*X/(X + 15*Y + 32*Z) -% v' = 9*Y/(X + 15*Y + 32*Z) +% u' = 4*X/(X + 15*Y + 32*Z) +% v' = 9*Y/(X + 15*Y + 32*Z) % -% u = 13*L*(u' - un), where un = 0.19793943 -% v = 13*L*(v' - vn), where vn = 0.46831096 +% u = 13*L*(u' - un), where un = 0.19793943 +% v = 13*L*(v' - vn), where vn = 0.46831096 % % This outputs `0<=L<=100`, `-134<=u<=220`, `-140<=v<=122`. % @@ -300,6 +293,11 @@ % * 16-bit images: (currently not supported) % * 32-bit images: `L`, `u`, and `v` are left as is % +% Note that when converting integer Luv images to RGB the intermediate +% `X`, `Y` and `Z` values are truncated to [0,2] range to fit white point +% limitations. It may lead to incorrect representation of colors with odd XYZ +% values. +% % The above formulae for converting RGB to/from various color spaces have been % taken from multiple sources on the web, primarily from the % [Charles Poynton site](http://www.poynton.com/ColorFAQ.html). @@ -312,11 +310,11 @@ % get color pictures from a single plane where `R`, `G`, and `B` pixels % (sensors of a particular component) are interleaved as follows: % -% R G R G R -% G (B) (G) B G -% R G R G R -% G B G B G -% R G R G R +% R G R G R +% G (B) (G) B G +% R G R G R +% G B G B G +% R G R G R % % The output RGB components of a pixel are interpolated from 1, 2, or 4 % neighbors of the pixel having the same color. There are several diff --git a/+cv/dct.m b/+cv/dct.m index 8d7f9eaff..42c9e07dd 100644 --- a/+cv/dct.m +++ b/+cv/dct.m @@ -1,7 +1,7 @@ %DCT Performs a forward or inverse discrete Cosine transform of 1D or 2D array % -% dst = cv.dct(src) -% dst = cv.dct(src, 'OptionName',optionValue, ...) +% dst = cv.dct(src) +% dst = cv.dct(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input floating-point single-channel array. @@ -11,40 +11,39 @@ % % ## Options % * __Inverse__ performs an inverse 1D or 2D transform instead of the default -% forward transform. default false +% forward transform. default false % * __Rows__ performs a forward or inverse transform of every individual row -% of the input matrix. This flag enables you to transform multiple -% vectors simultaneously and can be used to decrease the overhead (which -% is sometimes several times larger than the processing itself) to -% perform 3D and higher-dimensional transforms and so forth. -% default false +% of the input matrix. This flag enables you to transform multiple vectors +% simultaneously and can be used to decrease the overhead (which is sometimes +% several times larger than the processing itself) to perform 3D and +% higher-dimensional transforms and so forth. default false % % The function cv.dct performs a forward or inverse discrete Cosine % transform (DCT) of a 1D or 2D floating-point array: % % - Forward Cosine transform of a 1D vector of N elements: % -% Y = CN * X +% Y = CN * X % % where % -% CN(j,k) = sqrt(alpha_j/N) * cos((pi*(2k+1)*j)/2N) +% CN(j,k) = sqrt(alpha_j/N) * cos((pi*(2k+1)*j)/2N) % % and `alpha_0=1`, `alpha_j=2` for `j>0`. % % - Inverse Cosine transform of a 1D vector of N elements: % -% X = inv(CN) * Y = transpose(CN) * Y +% X = inv(CN) * Y = transpose(CN) * Y % -% (since `CN` is an orthogonal matrix, `CN * transpose(CN) = I`) +% (since `CN` is an orthogonal matrix, `CN * transpose(CN) = I`) % % - Forward 2D Cosine transform of MxN matrix: % -% Y = CN * X * transpose(CN) +% Y = CN * X * transpose(CN) % % - Inverse 2D Cosine transform of MxN matrix: % -% X = transpose(CN) * X * CN +% X = transpose(CN) * X * CN % % The function chooses the mode of operation by looking at the transformation % flags and size of the input array: @@ -56,16 +55,16 @@ % 1D transform. % - If none of the above is true, the function performs a 2D transform. % -% NOTE: Currently dct supports even-size arrays (2, 4, 6, etc.). For data +% Note: Currently cv.dct supports even-size arrays (2, 4, 6, etc.). For data % analysis and approximation, you can pad the array when necessary. Also, the % function performance depends very much, and not monotonically, on the array % size (see cv.getOptimalDFTSize). In the current implementation, DCT of a % vector of size `N` is calculated via DFT of a vector of size `N/2`. Thus, % the optimal DCT size `N1 >= N` can be calculated as: % -% function N1 = getOptimalDCTSize(N) -% N1 = 2 * cv.getOptimalDFTSize(fix((N+1)/2)); -% end +% function N1 = getOptimalDCTSize(N) +% N1 = 2 * cv.getOptimalDFTSize(fix((N+1)/2)); +% end % % Note: cv.idct is equivalent to `cv.dct(..., 'Inverse',true)`. % diff --git a/+cv/decolor.m b/+cv/decolor.m index 0512f4b3f..29ab4b015 100644 --- a/+cv/decolor.m +++ b/+cv/decolor.m @@ -1,22 +1,21 @@ %DECOLOR Transforms a color image to a grayscale image % -% [grayscale,color_boost] = cv.decolor(src) -% [grayscale,color_boost] = cv.decolor(src, 'OptionName',optionValue, ...) +% [grayscale, color_boost] = cv.decolor(src) +% [grayscale, color_boost] = cv.decolor(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel color image. % % ## Output % * __grayscale__ Output 8-bit 1-channel grayscale image. -% * **color_boost** Output 8-bit 3-channel contrast-boosted color image. -% It is obtained by converting `src` to Lab color space, replacing the -% lightness channel with `grayscale`, and converting back to RGB color -% space. +% * **color_boost** Output 8-bit 3-channel contrast-boosted color image. It is +% obtained by converting `src` to Lab color space, replacing the lightness +% channel with `grayscale`, and converting back to RGB color space. % % ## Options % * __FlipChannels__ whether to flip the order of color channels in input -% `src` and output `color_boost`, between MATLAB's RGB order and -% OpenCV's BGR (input: RGB->BGR, output: BGR->RGB). default false +% `src` and output `color_boost`, between MATLAB's RGB order and OpenCV's +% BGR (input: RGB->BGR, output: BGR->RGB). default false % % It is a basic tool in digital printing, stylized black-and-white photograph % rendering, and in many single channel image processing applications [CL12]. diff --git a/+cv/decomposeEssentialMat.m b/+cv/decomposeEssentialMat.m index 440b8a0f2..6531bf1ab 100644 --- a/+cv/decomposeEssentialMat.m +++ b/+cv/decomposeEssentialMat.m @@ -1,15 +1,15 @@ %DECOMPOSEESSENTIALMAT Decompose an essential matrix to possible rotations and translation % -% S = cv.decomposeEssentialMat(E) +% S = cv.decomposeEssentialMat(E) % % ## Input % * __E__ The input essential matrix, 3x3. % % ## Output % * __S__ Decomposed `E`. A scalar struct with the following fields: -% * __R1__ One possible rotation matrix, 3x3. -% * __R2__ Another possible rotation matrix, 3x3. -% * __t__ One possible translation, 3x1. +% * __R1__ One possible rotation matrix, 3x3. +% * __R2__ Another possible rotation matrix, 3x3. +% * __t__ One possible translation, 3x1. % % This function decompose an essential matrix `E` using SVD decomposition % [HartleyZ00]. Generally 4 possible poses exists for a given `E`. They are diff --git a/+cv/decomposeHomographyMat.m b/+cv/decomposeHomographyMat.m index 56d8b0e2f..7584f53f1 100644 --- a/+cv/decomposeHomographyMat.m +++ b/+cv/decomposeHomographyMat.m @@ -1,6 +1,6 @@ %DECOMPOSEHOMOGRAPHYMAT Decompose a homography matrix to rotation(s), translation(s) and plane normal(s) % -% [motions, nsols] = cv.decomposeHomographyMat(H, K) +% [motions, nsols] = cv.decomposeHomographyMat(H, K) % % ## Input % * __H__ The input homography matrix between two images, 3x3. @@ -8,9 +8,9 @@ % % ## Output % * __motions__ Decomposed `H`. A scalar struct with the following fields: -% * __R__ Array of rotation matrices. Cell array of 3x3 rotations. -% * __t__ Array of translation matrices. Cell array of 3x1 translations. -% * __n__ Array of plane normal matrices. Cell array of 3x1 normals. +% * __R__ Array of rotation matrices. Cell array of 3x3 rotations. +% * __t__ Array of translation matrices. Cell array of 3x1 translations. +% * __n__ Array of plane normal matrices. Cell array of 3x1 normals. % * __nsols__ number of solutions. % % This function extracts relative camera motion between two views observing a diff --git a/+cv/decomposeProjectionMatrix.m b/+cv/decomposeProjectionMatrix.m index 4c1fa4f7c..4ba723844 100644 --- a/+cv/decomposeProjectionMatrix.m +++ b/+cv/decomposeProjectionMatrix.m @@ -1,7 +1,7 @@ %DECOMPOSEPROJECTIONMATRIX Decomposes a projection matrix into a rotation matrix and a camera matrix % -% [cameraMatrix,rotMatrix,transVect] = cv.decomposeProjectionMatrix(projMatrix) -% [cameraMatrix,rotMatrix,transVect,S] = cv.decomposeProjectionMatrix(projMatrix) +% [cameraMatrix,rotMatrix,transVect] = cv.decomposeProjectionMatrix(projMatrix) +% [cameraMatrix,rotMatrix,transVect,S] = cv.decomposeProjectionMatrix(projMatrix) % % ## Input % * __projMatrix__ 3x4 input projection matrix P. @@ -11,11 +11,11 @@ % * __rotMatrix__ 3x3 external rotation matrix R. % * __transVect__ 4x1 translation vector T. % * __S__ Optional output struct with the following fields: -% * __rotMatrX__ 3x3 rotation matrix around x-axis. -% * __rotMatrY__ 3x3 rotation matrix around y-axis. -% * __rotMatrZ__ 3x3 rotation matrix around z-axis. -% * __eulerAngles__ 3-element vector containing three Euler angles of -% rotation in degrees. +% * __rotMatrX__ 3x3 rotation matrix around x-axis. +% * __rotMatrY__ 3x3 rotation matrix around y-axis. +% * __rotMatrZ__ 3x3 rotation matrix around z-axis. +% * __eulerAngles__ 3-element vector containing three Euler angles of +% rotation in degrees. % % The function computes a decomposition of a projection matrix into a % calibration and a rotation matrix and the position of a camera. diff --git a/+cv/demosaicing.m b/+cv/demosaicing.m index 24c68cd7e..36e634f41 100644 --- a/+cv/demosaicing.m +++ b/+cv/demosaicing.m @@ -1,39 +1,39 @@ %DEMOSAICING Demosaicing algorithm % -% dst = cv.demosaicing(src, code) -% dst = cv.demosaicing(..., 'OptionName', optionValue, ...) +% dst = cv.demosaicing(src, code) +% dst = cv.demosaicing(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ source image, 1-channel, `uint8` or `uint16` depth. % * __code__ color conversion code. The following strings are supported: -% * For Bayer->Gray demosaicing: -% * __BayerBG2GRAY__ -% * __BayerGB2GRAY__ -% * __BayerRG2GRAY__ -% * __BayerGR2GRAY__ -% * For Bayer->RGB demosaicing: -% * __BayerBG2BGR__, __BayerBG2RGB__ -% * __BayerGB2BGR__, __BayerGB2RGB__ -% * __BayerRG2BGR__, __BayerRG2RGB__ -% * __BayerGR2BGR__, __BayerGR2RGB__ -% * For Bayer->RGB demosaicing using Variable Number of Gradients: -% * **BayerBG2BGR_VNG**, **BayerBG2RGB_VNG** -% * **BayerGB2BGR_VNG**, **BayerGB2RGB_VNG** -% * **BayerRG2BGR_VNG**, **BayerRG2RGB_VNG** -% * **BayerGR2BGR_VNG**, **BayerGR2RGB_VNG** -% * For Bayer->RGB Edge-Aware demosaicing: -% * **BayerBG2BGR_EA**, **BayerBG2RGB_EA** -% * **BayerGB2BGR_EA**, **BayerGB2RGB_EA** -% * **BayerRG2BGR_EA**, **BayerRG2RGB_EA** -% * **BayerGR2BGR_EA**, **BayerGR2RGB_EA** +% * For Bayer->Gray demosaicing: +% * __BayerBG2GRAY__ +% * __BayerGB2GRAY__ +% * __BayerRG2GRAY__ +% * __BayerGR2GRAY__ +% * For Bayer->RGB demosaicing: +% * __BayerBG2BGR__, __BayerBG2RGB__ +% * __BayerGB2BGR__, __BayerGB2RGB__ +% * __BayerRG2BGR__, __BayerRG2RGB__ +% * __BayerGR2BGR__, __BayerGR2RGB__ +% * For Bayer->RGB demosaicing using Variable Number of Gradients: +% * **BayerBG2BGR_VNG**, **BayerBG2RGB_VNG** +% * **BayerGB2BGR_VNG**, **BayerGB2RGB_VNG** +% * **BayerRG2BGR_VNG**, **BayerRG2RGB_VNG** +% * **BayerGR2BGR_VNG**, **BayerGR2RGB_VNG** +% * For Bayer->RGB Edge-Aware demosaicing: +% * **BayerBG2BGR_EA**, **BayerBG2RGB_EA** +% * **BayerGB2BGR_EA**, **BayerGB2RGB_EA** +% * **BayerRG2BGR_EA**, **BayerRG2RGB_EA** +% * **BayerGR2BGR_EA**, **BayerGR2RGB_EA** % % ## Output % * __dst__ output image of same row-/col- size and depth as `src`, and of -% specified number of channels (see `Channels` option). +% specified number of channels (see `Channels` option). % % ## Options % * __Channels__ Number of channels. If <= 0, automatically determined based -% on `code`. default 0 +% on `code`. default 0 % % See also: cv.cvtColor, demosaic, vision.DemosaicInterpolator % diff --git a/+cv/denoise_TVL1.m b/+cv/denoise_TVL1.m index b1b725c5b..fd799a2f7 100644 --- a/+cv/denoise_TVL1.m +++ b/+cv/denoise_TVL1.m @@ -1,26 +1,26 @@ %DENOISE_TVL1 Primal-Dual algorithm to perform image denoising % -% result = cv.denoise_TVL1(observations) -% result = cv.denoise_TVL1(observations, 'OptionName',optionValue, ...) +% result = cv.denoise_TVL1(observations) +% result = cv.denoise_TVL1(observations, 'OptionName',optionValue, ...) % % ## Input % * __observations__ This cell array should contain one or more noised -% versions of the image that is to be restored. All images should have -% the same size and `uint8` type (grayscale). +% versions of the image that is to be restored. All images should have the +% same size and `uint8` type (grayscale). % % ## Output % * __result__ the denoised 8-bit image. % % ## Options % * __Lambda__ Corresponds to `lambda` in the formulas below. As it is -% enlarged, the smooth (blurred) images are treated more favorably than -% detailed (but maybe more noised) ones. Roughly speaking, as it becomes -% smaller, the result will be more blur but more sever outliers will be -% removed. default 1.0 +% enlarged, the smooth (blurred) images are treated more favorably than +% detailed (but maybe more noised) ones. Roughly speaking, as it becomes +% smaller, the result will be more blur but more sever outliers will be +% removed. default 1.0 % * __NIters__ Number of iterations that the algorithm will run. Of course, as -% more iterations as better, but it is hard to quantitatively refine -% this statement, so just use the default and increase it if the results -% are poor. default 30 +% more iterations as better, but it is hard to quantitatively refine this +% statement, so just use the default and increase it if the results are +% poor. default 30 % % Primal-dual algorithm is an algorithm for solving special types of % variational problems (that is, finding a function to minimize some @@ -42,7 +42,7 @@ % We shall denote the noised images as `f_i` and with this view, given some % image `x` of the same size, we may measure how bad it is by the formula: % -% || nabla_x || + lambda * sum_i || x - f_i || +% || nabla_x || + lambda * sum_i || x - f_i || % % `|| . ||` here denotes L2-norm and as you see, the first addend states that % we want our image to be smooth (ideally, having zero gradient, thus being diff --git a/+cv/detailEnhance.m b/+cv/detailEnhance.m index 359183ed7..47b87f3c1 100644 --- a/+cv/detailEnhance.m +++ b/+cv/detailEnhance.m @@ -1,7 +1,7 @@ %DETAILENHANCE This filter enhances the details of a particular image % -% dst = cv.detailEnhance(src) -% dst = cv.detailEnhance(src, 'OptionName',optionValue, ...) +% dst = cv.detailEnhance(src) +% dst = cv.detailEnhance(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -13,8 +13,8 @@ % * __SigmaS__ Range between 0 to 200. default 10 % * __SigmaR__ Range between 0 to 1. default 0.15 % * __FlipChannels__ whether to flip the order of color channels in input -% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR -% (input: RGB->BGR, output: BGR->RGB). default false +% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR +% (input: RGB->BGR, output: BGR->RGB). default false % % See also: cv.edgePreservingFilter, locallapfilt, localcontrast % diff --git a/+cv/dft.m b/+cv/dft.m index 1d54e6839..f24283a6c 100644 --- a/+cv/dft.m +++ b/+cv/dft.m @@ -1,7 +1,7 @@ %DFT Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array % -% dst = cv.dft(src) -% dst = cv.dft(src, 'OptionName',optionValue, ...) +% dst = cv.dft(src) +% dst = cv.dft(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input floating-point array that could be real or complex. @@ -11,75 +11,72 @@ % % ## Options % * __Inverse__ performs an inverse 1D or 2D transform instead of the default -% forward transform. default false +% forward transform. default false % * __Scale__ scales the result: divide it by the number of array elements. -% Normally, it is combined with the `Inverse` flag, as it guarantees -% that the inverse of the inverse will have the correct normalization. -% default false +% Normally, it is combined with the `Inverse` flag, as it guarantees that +% the inverse of the inverse will have the correct normalization. +% default false % * __Rows__ performs a forward or inverse transform of every individual row -% of the input matrix; this flag enables you to transform multiple -% vectors simultaneously and can be used to decrease the overhead (which -% is sometimes several times larger than the processing itself) to -% perform 3D and higher-dimensional transformations and so forth. -% default false +% of the input matrix; this flag enables you to transform multiple vectors +% simultaneously and can be used to decrease the overhead (which is sometimes +% several times larger than the processing itself) to perform 3D and +% higher-dimensional transformations and so forth. default false % * __ComplexOutput__ performs a forward transformation of 1D or 2D real -% array; the result, though being a complex array, has complex-conjugate -% symmetry (*CCS*, see the function description below for details), and -% such an array can be packed into a real array of the same size as -% input, which is the fastest option and which is what the function does -% by default; however, you may wish to get a full complex array (for -% simpler spectrum analysis, and so on), pass the flag to enable the -% function to produce a full-size complex output array. default false +% array; the result, though being a complex array, has complex-conjugate +% symmetry (*CCS*, see the function description below for details), and such +% an array can be packed into a real array of the same size as input, which +% is the fastest option and which is what the function does by default; +% however, you may wish to get a full complex array (for simpler spectrum +% analysis, and so on), pass the flag to enable the function to produce a +% full-size complex output array. default false % * __RealOutput__ performs an inverse transformation of a 1D or 2D complex -% array; the result is normally a complex array of the same size, -% however, if the input array has conjugate-complex symmetry (for -% example, it is a result of forward transformation with `ComplexOutput` -% flag), the output is a real array; while the function itself does not -% check whether the input is symmetrical or not, you can pass the flag -% and then the function will assume the symmetry and produce the real -% output array (note that when the input is packed into a real array and -% inverse transformation is executed, the function treats the input as a -% packed complex-conjugate symmetrical array, and the output will also -% be a real array). default false +% array; the result is normally a complex array of the same size, however, +% if the input array has conjugate-complex symmetry (for example, it is a +% result of forward transformation with `ComplexOutput` flag), the output is +% a real array; while the function itself does not check whether the input +% is symmetrical or not, you can pass the flag and then the function will +% assume the symmetry and produce the real output array (note that when the +% input is packed into a real array and inverse transformation is executed, +% the function treats the input as a packed complex-conjugate symmetrical +% array, and the output will also be a real array). default false % * __ComplexInput__ specifies that input is complex input. If this flag is -% set, the input must have 2 channels. On the other hand, for backwards -% compatibility reason, if input has 2 channels, input is already -% considered complex. default false +% set, the input must have 2 channels. On the other hand, for backwards +% compatibility reason, if input has 2 channels, input is already considered +% complex. default false % * __NonzeroRows__ when the parameter is not zero, the function assumes that -% only the first `NonzeroRows` rows of the input array (`Inverse` is not -% set) or only the first `NonzeroRows` of the output array (`Inverse` is -% set) contain non-zeros, thus, the function can handle the rest of the -% rows more efficiently and save some time; this technique is very -% useful for calculating array cross-correlation or convolution using -% DFT. default 0 +% only the first `NonzeroRows` rows of the input array (`Inverse` is not set) +% or only the first `NonzeroRows` of the output array (`Inverse` is set) +% contain non-zeros, thus, the function can handle the rest of the rows more +% efficiently and save some time; this technique is very useful for +% calculating array cross-correlation or convolution using DFT. default 0 % % The function cv.dft performs one of the following: % % * Forward the Fourier transform of a 1D vector of N elements: % -% Y = FN * X +% Y = FN * X % % where % -% FN(j,k) = exp(-2*pi * 1i * j * k/N) +% FN(j,k) = exp(-2*pi * 1i * j * k/N) % % and `1i = sqrt(-1)` % % * Inverse the Fourier transform of a 1D vector of N elements: % -% X'= inv(FN) * Y = ctranspose(FN) * Y -% X = (1/N) * X' +% X'= inv(FN) * Y = ctranspose(FN) * Y +% X = (1/N) * X' % % where `ctranspose(F) = transpose(conj(F)) = transpose(real(F) - 1i*imag(F))` % % * Forward the 2D Fourier transform of a MxN matrix: % -% Y = FM * X * FN +% Y = FM * X * FN % % * Inverse the 2D Fourier transform of a MxN matrix: % -% X'= ctranspose(FM) * Y * ctranspose(FN) -% X = (1/(M*N)) * X' +% X'= ctranspose(FM) * Y * ctranspose(FN) +% X = (1/(M*N)) * X' % % In case of real (single-channel) data, the output spectrum of the forward % Fourier transform or input spectrum of the inverse Fourier transform can be @@ -87,15 +84,15 @@ % It was borrowed from IPL (Intel Image Processing Library). Here is how 2D % *CCS* spectrum looks: % -% CCS = [ -% ReY(0,0), ReY(0,1), ImY(0,1), ..., ReY(0,N/2-1), ImY(0,N/2-1), ReY(0,N/2) -% ReY(1,0), ReY(1,1), ImY(1,1), ..., ReY(1,N/2-1), ImY(1,N/2-1), ReY(1,N/2) -% ImY(1,0), ReY(2,1), ImY(2,1), ..., ReY(2,N/2-1), ImY(2,N/2-1), ImY(1,N/2) -% ... -% ReY(M/2-1,0), ReY(M-3,1), ImY(M-3,1), ..., ReY(M-3,N/2-1), ImY(M-3,N/2-1), ReY(M/2-1,N/2) -% ImY(M/2-1,0), ReY(M-2,1), ImY(M-2,1), ..., ReY(M-2,N/2-1), ImY(M-2,N/2-1), ImY(M/2-1,N/2) -% ReY(M/2, 0), ReY(M-1,1), ImY(M-1,1), ..., ReY(M-1,N/2-1), ImY(M-1,N/2-1), ReY(M/2, N/2) -% ] +% CCS = [ +% ReY(0,0), ReY(0,1), ImY(0,1), ..., ReY(0,N/2-1), ImY(0,N/2-1), ReY(0,N/2) +% ReY(1,0), ReY(1,1), ImY(1,1), ..., ReY(1,N/2-1), ImY(1,N/2-1), ReY(1,N/2) +% ImY(1,0), ReY(2,1), ImY(2,1), ..., ReY(2,N/2-1), ImY(2,N/2-1), ImY(1,N/2) +% ... +% ReY(M/2-1,0), ReY(M-3,1), ImY(M-3,1), ..., ReY(M-3,N/2-1), ImY(M-3,N/2-1), ReY(M/2-1,N/2) +% ImY(M/2-1,0), ReY(M-2,1), ImY(M-2,1), ..., ReY(M-2,N/2-1), ImY(M-2,N/2-1), ImY(M/2-1,N/2) +% ReY(M/2, 0), ReY(M-1,1), ImY(M-1,1), ..., ReY(M-1,N/2-1), ImY(M-1,N/2-1), ReY(M/2, N/2) +% ] % % In case of 1D transform of a real vector, the output looks like the first % row of the matrix above. @@ -144,40 +141,40 @@ % The sample below illustrates how to calculate a DFT-based convolution of two % 2D real arrays: % -% function C = convolveDFT(A, B) -% % calculate the size of DFT transform -% dftSize = size(A) + size(B) - 1; -% dftSize(1) = cv.getOptimalDFTSize(dftSize(1)); -% dftSize(2) = cv.getOptimalDFTSize(dftSize(2)); +% function C = convolveDFT(A, B) +% % calculate the size of DFT transform +% dftSize = size(A) + size(B) - 1; +% dftSize(1) = cv.getOptimalDFTSize(dftSize(1)); +% dftSize(2) = cv.getOptimalDFTSize(dftSize(2)); % -% % allocate temporary buffers and initialize them with 0's -% tempA = zeros(dftSize, class(A)); -% tempB = zeros(dftSize, class(B)); +% % allocate temporary buffers and initialize them with 0's +% tempA = zeros(dftSize, class(A)); +% tempB = zeros(dftSize, class(B)); % -% % copy A/B to the top-left corners of tempA/tempB respectively -% tempA(1:size(A,1), 1:size(A,2)) = A; -% tempB(1:size(B,1), 1:size(B,2)) = B; +% % copy A/B to the top-left corners of tempA/tempB respectively +% tempA(1:size(A,1), 1:size(A,2)) = A; +% tempB(1:size(B,1), 1:size(B,2)) = B; % -% % now transform the padded A & B in-place; -% % use 'NonzeroRows' hint for faster processing -% tempA = cv.dft(tempA, 'NonzeroRows',size(A,1)); -% tempB = cv.dft(tempB, 'NonzeroRows',size(B,1)); +% % now transform the padded A & B in-place; +% % use 'NonzeroRows' hint for faster processing +% tempA = cv.dft(tempA, 'NonzeroRows',size(A,1)); +% tempB = cv.dft(tempB, 'NonzeroRows',size(B,1)); % -% % multiply the spectrums; -% % the function handles packed spectrum representations well -% C = cv.mulSpectrums(tempA, tempB); +% % multiply the spectrums; +% % the function handles packed spectrum representations well +% C = cv.mulSpectrums(tempA, tempB); % -% % the output array size -% sz = abs(size(A) - size(B)) + 1; +% % the output array size +% sz = abs(size(A) - size(B)) + 1; % -% % transform the product back from the frequency domain. -% % Even though all the result rows will be non-zero, -% % you need only the first sz(1) of them -% C = cv.dft(C, 'Inverse',true, 'Scale',true, 'NonzeroRows',sz(1)); +% % transform the product back from the frequency domain. +% % Even though all the result rows will be non-zero, +% % you need only the first sz(1) of them +% C = cv.dft(C, 'Inverse',true, 'Scale',true, 'NonzeroRows',sz(1)); % -% % now slice the result part from C -% C = C(1:sz(1), 1:sz(2)); -% end +% % now slice the result part from C +% C = C(1:sz(1), 1:sz(2)); +% end % % To optimize this sample, consider the following approaches: % diff --git a/+cv/dilate.m b/+cv/dilate.m index df47d81a9..0dc7aae03 100644 --- a/+cv/dilate.m +++ b/+cv/dilate.m @@ -1,36 +1,35 @@ %DILATE Dilates an image by using a specific structuring element % -% dst = cv.dilate(src) -% dst = cv.dilate(src, 'OptionName',optionValue, ...) +% dst = cv.dilate(src) +% dst = cv.dilate(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image; the number of channels can be arbitrary, but the -% depth should be one of `uint8`, `uint16`, `int16`, `single` or -% `double`. +% depth should be one of `uint8`, `uint16`, `int16`, `single` or `double`. % % ## Output % * __dst__ output image of the same size and type as `src`. % % ## Options % * __Element__ Structuring element used for dilation. By default, a 3x3 -% rectangular structuring element is used `ones(3)`. Kernel can be -% created using cv.getStructuringElement +% rectangular structuring element is used `ones(3)`. Kernel can be created +% using cv.getStructuringElement % * __Anchor__ Position of the anchor within the element. The default value -% [-1, -1] means that the anchor is at the element center. +% [-1, -1] means that the anchor is at the element center. % * __Iterations__ Number of times dilation is applied. default 1 % * __BorderType__ Border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. default 'Constant' +% image. See cv.copyMakeBorder. default 'Constant' % * __BorderValue__ Border value in case of a constant border. The default -% value has a special meaning which gets automatically translated to the -% minimum value of the image class type (`intmin(class(img))` for -% integer types and `realmin(class(img))` for floating-point types). -% See cv.morphologyDefaultBorderValue +% value has a special meaning which gets automatically translated to the +% minimum value of the image class type (`intmin(class(img))` for integer +% types and `realmin(class(img))` for floating-point types). See +% cv.morphologyDefaultBorderValue % % The function dilates the source image using the specified structuring % element that determines the shape of a pixel neighborhood over which the % maximum is taken: % -% dst(x,y) = max_{(xp,yp): Element(xp,yp)!=0} src(x+xp, y+yp) +% dst(x,y) = max_{(xp,yp): Element(xp,yp)!=0} src(x+xp, y+yp) % % Dilation can be applied several (`Iterations`) times. In case of % multi-channel images, each channel is processed independently. diff --git a/+cv/distanceTransform.m b/+cv/distanceTransform.m index 1eaff2be3..a155de4fb 100644 --- a/+cv/distanceTransform.m +++ b/+cv/distanceTransform.m @@ -1,42 +1,42 @@ %DISTANCETRANSFORM Calculates the distance to the closest zero pixel for each pixel of the source image % -% dst = cv.distanceTransform(src) -% [dst, labels] = cv.distanceTransform(src) -% [...] = cv.distanceTransform(..., 'OptionName',optionValue, ...) +% dst = cv.distanceTransform(src) +% [dst, labels] = cv.distanceTransform(src) +% [...] = cv.distanceTransform(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ 8-bit, single-channel (binary) source image. % % ## Output % * __dst__ Output image with calculated distances. It is a 8-bit or 32-bit -% floating-point, single-channel image of the same size as `src`. +% floating-point, single-channel image of the same size as `src`. % * __labels__ Optional output 2D array of labels (the discrete Voronoi -% diagram). It has the type `int32` and the same size as `src`. +% diagram). It has the type `int32` and the same size as `src`. % % ## Options % * __DistanceType__ Type of distance, default 'L2'. One of: -% * __L1__ `distance = |x1-x2| + |y1-y2|` -% * __L2__ the simple euclidean distance -% * __C__ `distance = max(|x1-x2|,|y1-y2|)` +% * __L1__ `distance = |x1-x2| + |y1-y2|` +% * __L2__ the simple euclidean distance +% * __C__ `distance = max(|x1-x2|,|y1-y2|)` % * __MaskSize__ Size of the distance transform mask. The 'Precise' option is -% not supported by the second variant with `labels` output. In case of -% the 'L1' or 'C' distance type, the parameter is forced to 3 because a -% 3x3 mask gives the same result as 5x5 or any larger aperture. The -% following options are available: -% * __3__ approximate distance transform with 3x3 mask (default) -% * __5__ approximate distance transform with 5x5 mask -% * __Precise__, __0__ precise distance transform +% not supported by the second variant with `labels` output. In case of the +% 'L1' or 'C' distance type, the parameter is forced to 3 because a 3x3 mask +% gives the same result as 5x5 or any larger aperture. The following options +% are available: +% * __3__ approximate distance transform with 3x3 mask (default) +% * __5__ approximate distance transform with 5x5 mask +% * __Precise__, __0__ precise distance transform % * __LabelType__ Type of the label array to build, default 'CComp'. Only -% supported by the second variant with `labels` output. One of: -% * __CComp__ each connected component of zeros in `src` (as well as all -% the non-zero pixels closest to the connected component) will be -% assigned the same label. -% * __Pixel__ each zero pixel (and all the non-zero pixels closest to -% it) gets its own label. +% supported by the second variant with `labels` output. One of: +% * __CComp__ each connected component of zeros in `src` (as well as all the +% non-zero pixels closest to the connected component) will be assigned the +% same label. +% * __Pixel__ each zero pixel (and all the non-zero pixels closest to it) +% gets its own label. % * __DstType__ Type of output image `dst`. It can be `uint8` or `single`. -% Only supported by the first variant without `labels` output. Type -% `uint8` can be used only for the first variant of the function and -% `DistanceType = 'L1'`, otherwise the default `single` is assumed. +% Only supported by the first variant without `labels` output. Type `uint8` +% can be used only for the first variant of the function and +% `DistanceType = 'L1'`, otherwise the default `single` is assumed. % % The function cv.distanceTransform calculates the approximate or precise % distance from every binary image pixel to the nearest zero pixel. For zero @@ -60,12 +60,12 @@ % `c`, OpenCV uses the values suggested in the original paper: % % * __L1__ -% * 3x3: `a = 1, b = 2` +% * 3x3: `a = 1, b = 2` % * __L2__ -% * 3x3: `a = 0.955, b = 1.3693` -% * 5x5: `a = 1, b = 1.4, c = 2.1969` +% * 3x3: `a = 0.955, b = 1.3693` +% * 5x5: `a = 1, b = 1.4, c = 2.1969` % * __C__ -% * 3x3: `a = 1, b = 1` +% * 3x3: `a = 1, b = 1` % % Typically, for a fast, coarse distance estimation 'L2', a 3x3 mask is used. % For a more accurate distance estimation 'L2', a 5x5 mask or the precise diff --git a/+cv/divide.m b/+cv/divide.m index f53fc20a6..5e7c51371 100644 --- a/+cv/divide.m +++ b/+cv/divide.m @@ -1,7 +1,7 @@ %DIVIDE Performs per-element division of two arrays or a scalar by an array % -% dst = cv.divide(src1, src2) -% dst = cv.divide(src1, src2, 'OptionName',optionValue, ...) +% dst = cv.divide(src1, src2) +% dst = cv.divide(src1, src2, 'OptionName',optionValue, ...) % % ## Input % * __src1__ first input array. @@ -13,12 +13,12 @@ % ## Options % * __Scale__ optional scalar factor. default 1 % * __DType__ optional depth of the output array; if -1, `dst` will have depth -% `class(src2)`, but in case of an array-by-array division, you can only -% pass -1 when `class(src1)==class(src2)`. default -1 +% `class(src2)`, but in case of an array-by-array division, you can only +% pass -1 when `class(src1)==class(src2)`. default -1 % % The function cv.divide divides one array by another: % -% dst(I) = saturate(src1(I)*scale / src2(I)) +% dst(I) = saturate(src1(I)*scale / src2(I)) % % When `src2(I)` is zero, `dst(I)` will also be zero. Different channels of % multi-channel arrays are processed independently. diff --git a/+cv/drawChessboardCorners.m b/+cv/drawChessboardCorners.m index 995c9257f..b1ac2216d 100644 --- a/+cv/drawChessboardCorners.m +++ b/+cv/drawChessboardCorners.m @@ -1,22 +1,22 @@ %DRAWCHESSBOARDCORNERS Renders the detected chessboard corners % -% im = cv.drawChessboardCorners(im, patternSize, corners) -% im = cv.drawChessboardCorners(..., 'OptionName', optionValue, ...) +% im = cv.drawChessboardCorners(im, patternSize, corners) +% im = cv.drawChessboardCorners(..., 'OptionName', optionValue, ...) % % ## Input % * __im__ Source image. It must be an 8-bit color image. % * __patternSize__ Number of inner corners per a chessboard row and column -% (`patternSize = [points_per_row, points_per_column]`). +% (`patternSize = [points_per_row, points_per_column]`). % * __corners__ Array of detected corners, the output of -% cv.findChessboardCorners. +% cv.findChessboardCorners. % % ## Output % * __im__ Destination image. % % ## Options % * __PatternWasFound__ Parameter indicating whether the complete board was -% found or not. The return value of cv.findChessboardCorners should be -% passed here. default true +% found or not. The return value of cv.findChessboardCorners should be +% passed here. default true % % The function draws individual chessboard corners detected either as red % circles if the board was not found, or as colored corners connected with diff --git a/+cv/drawContours.m b/+cv/drawContours.m index ffcd2987d..4e90d143c 100644 --- a/+cv/drawContours.m +++ b/+cv/drawContours.m @@ -1,42 +1,42 @@ %DRAWCONTOURS Draws contours outlines or filled contours % -% im = cv.drawContours(im, contours) -% im = cv.drawContours(im, contours, 'OptionName', optionValue, ...) +% im = cv.drawContours(im, contours) +% im = cv.drawContours(im, contours, 'OptionName', optionValue, ...) % % ## Input % * __im__ Image on which to draw contours. % * __contours__ All the input contours. Each contour is stored as a 2D point -% vector (integer points). A cell array of cell arrays of the form: -% `{{[x,y],[x,y],...}, ...}`, or a cell array of Nx2 matrices. +% vector (integer points). A cell array of cell arrays of the form: +% `{{[x,y],[x,y],...}, ...}`, or a cell array of Nx2 matrices. % % ## Output % * __im__ Destination image, same size and type as input `im`. % % ## Options % * __ContourIdx__ Parameter indicating a contour to draw. A zero-based index -% in the range `[0,length(contours)-1]`. If it is negative, all the -% contours are drawn. default -1 +% in the range `[0,length(contours)-1]`. If it is negative, all the contours +% are drawn. default -1 % * __Color__ Color of the contours. default is white color. % * __Thickness__ Thickness of lines the contours are drawn with. If it is -% negative (for example, -1 or the string 'Filled'), the contour -% interiors are drawn. default 1 +% negative (for example, -1 or the string 'Filled'), the contour interiors +% are drawn. default 1 % * __LineType__ Line connectivity. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Hierarchy__ Optional information about hierarchy. It is only needed if -% you want to draw only some of the contours (see `MaxLevel`). -% A cell array of 4-element vectors for each contour of the form -% `{[next,prev,child,parent], ...}`, or a Nx4/Nx1x4/1xNx4 numeric -% matrix of integers. default empty +% you want to draw only some of the contours (see `MaxLevel`). A cell array +% of 4-element vectors for each contour of the form +% `{[next,prev,child,parent], ...}`, or a Nx4/Nx1x4/1xNx4 numeric matrix of +% integers. default empty % * __MaxLevel__ Maximal level for drawn contours. If it is 0, only the -% specified contour is drawn. If it is 1, the function draws the -% contour(s) and all the nested contours. If it is 2, the function draws -% the contours, all the nested contours, all the nested-to-nested -% contours, and so on. This parameter is only taken into account when -% there is `Hierarchy` available. default `intmax('int32')` +% specified contour is drawn. If it is 1, the function draws the contour(s) +% and all the nested contours. If it is 2, the function draws the contours, +% all the nested contours, all the nested-to-nested contours, and so on. +% This parameter is only taken into account when there is `Hierarchy` +% available. default `intmax('int32')` % * __Offset__ Optional contour shift parameter. Shift all the drawn contours -% by the specified `offset = (dx,dy)`. default [0,0] +% by the specified `offset = (dx,dy)`. default [0,0] % % The function draws contour outlines in the image if `Thickness >= 0` or % fills the area bounded by the contours if `Thickness < 0`. @@ -45,27 +45,27 @@ % The example below shows how to retrieve connected components from a binary % image and label them: % -% % binary (black-n-white) image -% src = cv.imread('bw.png', 'Flags',0); -% src = logical(src > 0); +% % binary (black-n-white) image +% src = cv.imread('bw.png', 'Flags',0); +% src = logical(src > 0); % -% [contours,hierarchy] = cv.findContours(src, 'Mode','CComp', ... -% 'Method','Simple'); +% [contours,hierarchy] = cv.findContours(src, 'Mode','CComp', ... +% 'Method','Simple'); % -% % iterate through all the top-level contours, -% % draw each connected component with its own random color -% dst = zeros([size(src),3], 'uint8'); -% idx = 0; -% while idx >= 0 -% color = randi([0 255], [1 3], 'uint8'); -% dst = cv.drawContours(dst, contours, 'ContourIdx',idx, ... -% 'Color',color, 'Thickness','Filled', 'LineType',8, ... -% 'Hierarchy',hierarchy); -% idx = hierarchy{idx+1}(1); -% end +% % iterate through all the top-level contours, +% % draw each connected component with its own random color +% dst = zeros([size(src),3], 'uint8'); +% idx = 0; +% while idx >= 0 +% color = randi([0 255], [1 3], 'uint8'); +% dst = cv.drawContours(dst, contours, 'ContourIdx',idx, ... +% 'Color',color, 'Thickness','Filled', 'LineType',8, ... +% 'Hierarchy',hierarchy); +% idx = hierarchy{idx+1}(1); +% end % -% subplot(121), imshow(src), title('Source') -% subplot(122), imshow(dst), title('Components') +% subplot(121), imshow(src), title('Source') +% subplot(122), imshow(dst), title('Components') % % See also: cv.findContours, cv.fillPoly, visboundaries, bwlabel % diff --git a/+cv/drawKeypoints.m b/+cv/drawKeypoints.m index 3a51daebc..dfe2c89ce 100644 --- a/+cv/drawKeypoints.m +++ b/+cv/drawKeypoints.m @@ -1,40 +1,40 @@ %DRAWKEYPOINTS Draws keypoints % -% out = cv.drawKeypoints(im, keypoints) -% out = cv.drawKeypoints(im, keypoints, 'OptionName', optionValue, ...) +% out = cv.drawKeypoints(im, keypoints) +% out = cv.drawKeypoints(im, keypoints, 'OptionName', optionValue, ...) % % ## Input % * __im__ Source image. -% * __keypoints__ Keypoints from the source image. -% A 1-by-N structure array with the following fields: -% * __pt__ coordinates of the keypoint `[x,y]` -% * __size__ diameter of the meaningful keypoint neighborhood -% * __angle__ computed orientation of the keypoint (-1 if not -% applicable). Its possible values are in a range [0,360) degrees. -% It is measured relative to image coordinate system (y-axis is -% directed downward), i.e in clockwise. -% * __response__ the response by which the most strong keypoints have -% been selected. Can be used for further sorting or subsampling. -% * __octave__ octave (pyramid layer) from which the keypoint has been -% extracted. -% * **class_id** object id that can be used to clustered keypoints by an -% object they belong to. +% * __keypoints__ Keypoints from the source image. A 1-by-N structure array +% with the following fields: +% * __pt__ coordinates of the keypoint `[x,y]` +% * __size__ diameter of the meaningful keypoint neighborhood +% * __angle__ computed orientation of the keypoint (-1 if not applicable). +% Its possible values are in a range [0,360) degrees. It is measured +% relative to image coordinate system (y-axis is directed downward), i.e +% in clockwise. +% * __response__ the response by which the most strong keypoints have been +% selected. Can be used for further sorting or subsampling. +% * __octave__ octave (pyramid layer) from which the keypoint has been +% extracted. +% * **class_id** object id that can be used to clustered keypoints by an +% object they belong to. % % ## Output % * __out__ Output image. Its content depends on the option values defining -% what is drawn in the output image. See possible options below. -% By default, the source image, and single keypoints will be drawn. -% For each keypoint, only the center point will be drawn (without -% a circle around the keypoint with the keypoint size and orientation). +% what is drawn in the output image. See possible options below. By default, +% the source image, and single keypoints will be drawn. For each keypoint, +% only the center point will be drawn (without a circle around the keypoint +% with the keypoint size and orientation). % % ## Options % * __Color__ Color of keypoints. If all -1, random colors are picked up. -% default [-1,-1,-1,-1] +% default [-1,-1,-1,-1] % * __DrawRichKeypoints__ For each keypoint, the circle around keypoint with -% keypoint size and orientation will be drawn. default false. +% keypoint size and orientation will be drawn. default false. % * __OutImage__ If set, keypoints will be drawn on existing content of output -% image, otherwise source image is used instead. Default not set -% (i.e keypoints are drawn on top of `im`). +% image, otherwise source image is used instead. Default not set +% (i.e keypoints are drawn on top of `im`). % % See also: cv.drawMatches, cv.FeatureDetector % diff --git a/+cv/drawMarker.m b/+cv/drawMarker.m index b2b648af5..5e4b051be 100644 --- a/+cv/drawMarker.m +++ b/+cv/drawMarker.m @@ -1,7 +1,7 @@ %DRAWMARKER Draws a marker on a predefined position in an image % -% img = cv.drawMarker(img, pos) -% img = cv.drawMarker(..., 'OptionName', optionValue, ...) +% img = cv.drawMarker(img, pos) +% img = cv.drawMarker(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Input image. @@ -13,20 +13,20 @@ % ## Options % * __Color__ Line color. Default is a black color. % * __MarkerType__ The specific type of marker you want to use. One of: -% * __Cross__ (`+`) (default) A crosshair marker shape. -% * __TiltedCross__ (`x`) A 45 degree tilted crosshair marker shape. -% * __Star__ (`*`) A star marker shape, combination of cross and tilted -% cross. -% * __Diamond__ (`d`) A diamond marker shape. -% * __Square__ (`s`) A square marker shape. -% * __TriangleUp__ (`^`) An upwards pointing triangle marker shape. -% * __TriangleDown__ (`v`) A downwards pointing triangle marker shape. +% * __Cross__ (`+`) (default) A crosshair marker shape. +% * __TiltedCross__ (`x`) A 45 degree tilted crosshair marker shape. +% * __Star__ (`*`) A star marker shape, combination of cross and tilted +% cross. +% * __Diamond__ (`d`) A diamond marker shape. +% * __Square__ (`s`) A square marker shape. +% * __TriangleUp__ (`^`) An upwards pointing triangle marker shape. +% * __TriangleDown__ (`v`) A downwards pointing triangle marker shape. % * __MarkerSize__ The length of the marker axis. Default is 20 pixels. % * __Thickness__ Line thickness. default 1 % * __LineType__ Type of the line. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % % The function cv.drawMarker draws a marker on a given position in the image. % For the moment several marker types are supported, see `MarkerType` for more diff --git a/+cv/drawMatches.m b/+cv/drawMatches.m index 405b347e0..a2c7a3979 100644 --- a/+cv/drawMatches.m +++ b/+cv/drawMatches.m @@ -1,69 +1,67 @@ %DRAWMATCHES Draws the found matches of keypoints from two images % -% out = cv.drawMatches(im1, keypoints1, im2, keypoints2, matches1to2) -% out = cv.drawMatches(..., 'OptionName', optionValue, ...) +% out = cv.drawMatches(im1, keypoints1, im2, keypoints2, matches1to2) +% out = cv.drawMatches(..., 'OptionName', optionValue, ...) % % ## Input % * __im1__ First source image. -% * __keypoints1__ Keypoints from the first source image. -% A 1-by-N structure array with the following fields: -% * __pt__ coordinates of the keypoint `[x,y]` -% * __size__ diameter of the meaningful keypoint neighborhood -% * __angle__ computed orientation of the keypoint (-1 if not -% applicable). Its possible values are in a range [0,360) degrees. -% It is measured relative to image coordinate system (y-axis is -% directed downward), i.e in clockwise. -% * __response__ the response by which the most strong keypoints have -% been selected. Can be used for further sorting or subsampling. -% * __octave__ octave (pyramid layer) from which the keypoint has been -% extracted. -% * **class_id** object id that can be used to clustered keypoints by an -% object they belong to. +% * __keypoints1__ Keypoints from the first source image. A 1-by-N structure +% array with the following fields: +% * __pt__ coordinates of the keypoint `[x,y]` +% * __size__ diameter of the meaningful keypoint neighborhood +% * __angle__ computed orientation of the keypoint (-1 if not applicable). +% Its possible values are in a range [0,360) degrees. It is measured +% relative to image coordinate system (y-axis is directed downward), i.e +% in clockwise. +% * __response__ the response by which the most strong keypoints have been +% selected. Can be used for further sorting or subsampling. +% * __octave__ octave (pyramid layer) from which the keypoint has been +% extracted. +% * **class_id** object id that can be used to clustered keypoints by an +% object they belong to. % * __im2__ Second source image. % * __keypoints2__ Keypoints from the second source image. Same format as -% `keypoints1`. -% * __matches1to2__ Matches from the first image to the second one, which -% means that `keypoints1(i)` has a corresponding point in -% `keypoints2(matches(i))`. -% A 1-by-M structure array with the following fields: -% * __queryIdx__ query descriptor index (zero-based index) -% * __trainIdx__ train descriptor index (zero-based index) -% * __imgIdx__ train image index (zero-based index) -% * __distance__ distance between descriptors (scalar) +% `keypoints1`. +% * __matches1to2__ Matches from the first image to the second one, which means +% that `keypoints1(i)` has a corresponding point in `keypoints2(matches(i))`. +% A 1-by-M structure array with the following fields: +% * __queryIdx__ query descriptor index (zero-based index) +% * __trainIdx__ train descriptor index (zero-based index) +% * __imgIdx__ train image index (zero-based index) +% * __distance__ distance between descriptors (scalar) % % ## Output % * __out__ Output image. Its content depends on the option values defining -% what is drawn in the output image. See possible options below. -% By default, the two source images, matches, and single -% keypoints will be drawn. For each keypoint, only the center point -% will be drawn (without a circle around the keypoint with the -% keypoint size and orientation). +% what is drawn in the output image. See possible options below. By default, +% the two source images, matches, and single keypoints will be drawn. For +% each keypoint, only the center point will be drawn (without a circle +% around the keypoint with the keypoint size and orientation). % % ## Options -% * __MatchColor__ Color of matches (lines and connected keypoints). If all -% -1, the color is generated randomly. default [-1,-1,-1,-1]. +% * __MatchColor__ Color of matches (lines and connected keypoints). If all -1, +% the color is generated randomly. default [-1,-1,-1,-1]. % * __SinglePointColor__ Color of single keypoints (circles), which means that -% keypoints do not have the matches. If all -1, the color is generated -% randomly. default [-1,-1,-1,-1]. +% keypoints do not have the matches. If all -1, the color is generated +% randomly. default [-1,-1,-1,-1]. % * __MatchesMask__ Mask determining which matches are drawn. If the mask is -% empty, all matches are drawn. default empty. +% empty, all matches are drawn. default empty. % * __NotDrawSinglePoints__ Single keypoints will not be drawn. default false % * __DrawRichKeypoints__ For each keypoint, the circle around keypoint with -% keypoint size and orientation will be drawn. default false. +% keypoint size and orientation will be drawn. default false. % * __OutImage__ If set, matches will be drawn on existing content of output -% image, otherwise source image is used instead. Default not set +% image, otherwise source image is used instead. Default not set % % This function draws matches of keypoints from two images in the output % image. Match is a line connecting two keypoints (circles). % % In pseudo-code, the function works as follows: % -% for m=1:numel(matches1to2) -% if ~MatchesMask(m), continue, end -% kp1 = keypoints1(matches1to2(m).queryIdx + 1); -% kp2 = keypoints2(matches1to2(m).trainIdx + 1); -% draw_match(kp1, kp2); -% end +% for m=1:numel(matches1to2) +% if ~MatchesMask(m), continue, end +% kp1 = keypoints1(matches1to2(m).queryIdx + 1); +% kp2 = keypoints2(matches1to2(m).trainIdx + 1); +% draw_match(kp1, kp2); +% end % % See also: cv.drawKeypoints, showMatchedFeatures % diff --git a/+cv/edgePreservingFilter.m b/+cv/edgePreservingFilter.m index 6923fdea3..b87d8a2aa 100644 --- a/+cv/edgePreservingFilter.m +++ b/+cv/edgePreservingFilter.m @@ -1,7 +1,7 @@ %EDGEPRESERVINGFILTER Edge-preserving smoothing filter % -% dst = cv.edgePreservingFilter(src) -% dst = cv.edgePreservingFilter(src, 'OptionName',optionValue, ...) +% dst = cv.edgePreservingFilter(src) +% dst = cv.edgePreservingFilter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -11,13 +11,13 @@ % % ## Options % * __Filter__ Edge preserving filters, one of: -% * __Recursive__ (default) -% * __NormConv__ +% * __Recursive__ (default) +% * __NormConv__ % * __SigmaS__ Range between 0 to 200. default 60 % * __SigmaR__ Range between 0 to 1. default 0.4 % * __FlipChannels__ whether to flip the order of color channels in input -% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR -% (input: RGB->BGR, output: BGR->RGB). default false +% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR +% (input: RGB->BGR, output: BGR->RGB). default false % % Filtering is the fundamental operation in image and video processing. % Edge-preserving smoothing filters are used in many different applications diff --git a/+cv/eigen.m b/+cv/eigen.m index 7cd97ab77..1288b8ec3 100644 --- a/+cv/eigen.m +++ b/+cv/eigen.m @@ -1,24 +1,27 @@ %EIGEN Calculates eigenvalues and eigenvectors of a symmetric matrix % -% eigenvalues = cv.eigen(src) -% [eigenvalues,eigenvectors,b] = cv.eigen(src) +% evals = cv.eigen(src) +% [evals, evects, b] = cv.eigen(src) % % ## Input -% * __src__ input matrix that must have single or double type, square size -% and be symmetrical (`src' == src`). +% * __src__ input matrix that must have single or double type, square size and +% be symmetrical (`src' == src`). % % ## Output -% * __eigenvalues__ output vector of eigenvalues of the same type as `src`; -% the eigenvalues are stored in the descending order. -% * __eigenvectors__ output matrix of eigenvectors; it has the same size and -% type as `src`; the eigenvectors are stored as subsequent matrix rows, -% in the same order as the corresponding `eigenvalues`. +% * __evals__ output vector of eigenvalues of the same type as `src`; +% the eigenvalues are stored in the descending order. +% * __evects__ output matrix of eigenvectors; it has the same size and +% type as `src`; the eigenvectors are stored as subsequent matrix rows, in +% the same order as the corresponding `evals`. % * __b__ bool return value % % The function cv.eigen calculates just eigenvalues, or eigenvalues and % eigenvectors of the symmetric matrix `src`: % -% src*eigenvectors(i,:)' = eigenvalues(i)*eigenvectors(i,:)' +% src * evects(i,:)' = evals(i) * evects(i,:)' % -% See also: cv.PCA, eig +% Note: Use cv.eigenNonSymmetric for calculation of real eigenvalues and +% eigenvectors of non-symmetric matrix. +% +% See also: cv.eigenNonSymmetric, cv.PCA, eig % diff --git a/+cv/eigenNonSymmetric.m b/+cv/eigenNonSymmetric.m new file mode 100644 index 000000000..a6b183ab1 --- /dev/null +++ b/+cv/eigenNonSymmetric.m @@ -0,0 +1,23 @@ +%EIGENNONSYMMETRIC Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only) +% +% evals = cv.eigenNonSymmetric(src) +% [evals, evects] = cv.eigenNonSymmetric(src) +% +% ## Input +% * __src__ input square matrix (`single` or `double` type, single-channel). +% +% ## Output +% * __evals__ output vector of eigenvalues of the same type as `src` +% * __evects__ output matrix of eigenvectors of the same type as `src`. +% The eigenvectors are stored as subsequent matrix rows, in the same order +% as the corresponding `evals`. +% +% The function calculates eigenvalues and eigenvectors (optional) of the +% square matrix `src`: +% +% src * evects(i,:)' = evals(i) * evects(i,:)' +% +% Note: Assumes real eigenvalues. +% +% See also: cv.eigen, eig +% diff --git a/+cv/ellipse.m b/+cv/ellipse.m index 63331d14a..8c8533e1a 100644 --- a/+cv/ellipse.m +++ b/+cv/ellipse.m @@ -1,21 +1,20 @@ %ELLIPSE Draws a simple or thick elliptic arc or fills an ellipse sector % -% img = cv.ellipse(img, center, axes) -% img = cv.ellipse(img, box) -% [...] = cv.ellipse(..., 'OptionName', optionValue, ...) +% img = cv.ellipse(img, center, axes) +% img = cv.ellipse(img, box) +% [...] = cv.ellipse(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image where the ellipse is drawn. % * __center__ Center of the ellipse `[x,y]`. % * __axes__ Half of the size of the ellipse main axes `[a,b]`. % * __box__ Alternative ellipse representation via a rotated rectangle box. -% This means that the function draws an ellipse inscribed in the rotated -% rectangle. A scalar structure with the following fields: -% * __center__ The rectangle mass center `[x,y]`. -% * __size__ Width and height of the rectangle `[w,h]`. -% * __angle__ The rotation angle in a clockwise direction. When the -% angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right -% rectangle. +% This means that the function draws an ellipse inscribed in the rotated +% rectangle. A scalar structure with the following fields: +% * __center__ The rectangle mass center `[x,y]`. +% * __size__ Width and height of the rectangle `[w,h]`. +% * __angle__ The rotation angle in a clockwise direction. When the angle is +% 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. % % ## Output % * __img__ Output image, same size and type as input `img`. @@ -26,14 +25,14 @@ % * __EndAngle__ Ending angle of the elliptic arc in degrees. default 360 % * __Color__ 3-element floating-point vector specifying ellipse color. % * __Thickness__ Thickness of the ellipse arc outline, if positive. -% Otherwise, this indicates that a filled ellipse sector is to be drawn -% (-1 or the string 'Filled'). default 1. +% Otherwise, this indicates that a filled ellipse sector is to be drawn +% (-1 or the string 'Filled'). default 1. % * __LineType__ Type of the ellipse boundary. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the coordinates of the center and -% values of axes. default 0. +% values of axes. default 0. % % The first variant of the cv.ellipse function draws an ellipse outline, a % filled ellipse, an elliptic arc, or a filled ellipse sector. The drawing @@ -46,7 +45,7 @@ % `EndAngle`, they are swapped. The figure below explains the meaning of the % parameters to draw the blue arc. % -% ![Parameters of Elliptic Arc][http://docs.opencv.org/3.3.0/ellipse.svg] +% ![Parameters of Elliptic Arc](https://docs.opencv.org/3.3.1/ellipse.svg) % % The second variant of the function (with rotated rectange as input) does not % support the `Angle`, `StartAngle`, `EndAngle`, and `Shift` parameters diff --git a/+cv/ellipse2Poly.m b/+cv/ellipse2Poly.m index 38dcd57fc..864cfc3fa 100644 --- a/+cv/ellipse2Poly.m +++ b/+cv/ellipse2Poly.m @@ -1,24 +1,24 @@ %ELLIPSE2POLY Approximates an elliptic arc with a polyline % -% pts = cv.ellipse2Poly(center, axes) -% [...] = cv.ellipse2Poly(..., 'OptionName', optionValue, ...) +% pts = cv.ellipse2Poly(center, axes) +% [...] = cv.ellipse2Poly(..., 'OptionName', optionValue, ...) % % ## Input % * __center__ Center of the arc `[x,y]`. % * __axes__ Half of the size of the ellipse main axes `[a,b]`. See cv.ellipse -% for details. +% for details. % % ## Output % * __pts__ Output vector of polyline vertices. An Nx2 numeric matrix -% `[x y; ...]`. +% `[x y; ...]`. % % ## Options % * __Angle__ Rotation angle of the ellipse in degrees. See cv.ellipse for -% details. default 0. +% details. default 0. % * __StartAngle__ Starting angle of the elliptic arc in degrees. default 0 % * __EndAngle__ Ending angle of the elliptic arc in degrees. default 360 % * __Delta__ Angle between the subsequent polyline vertices. It defines the -% approximation accuracy. default 5. +% approximation accuracy. default 5. % % The function cv.ellipse2Poly computes the vertices of a polyline that % approximates the specified elliptic arc. It is used by cv.ellipse. If diff --git a/+cv/equalizeHist.m b/+cv/equalizeHist.m index e6a807f2e..6a6681000 100644 --- a/+cv/equalizeHist.m +++ b/+cv/equalizeHist.m @@ -1,6 +1,6 @@ %EQUALIZEHIST Equalizes the histogram of a grayscale image % -% dst = cv.equalizeHist(src) +% dst = cv.equalizeHist(src) % % ## Input % * __src__ Source 8-bit single channel image. diff --git a/+cv/erode.m b/+cv/erode.m index 3ab3b5e13..a2130d478 100644 --- a/+cv/erode.m +++ b/+cv/erode.m @@ -1,36 +1,35 @@ %ERODE Erodes an image by using a specific structuring element % -% dst = cv.erode(src) -% dst = cv.erode(src, 'OptionName',optionValue, ...) +% dst = cv.erode(src) +% dst = cv.erode(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image; the number of channels can be arbitrary, but the -% depth should be one of `uint8`, `uint16`, `int16`, `single` or -% `double`. +% depth should be one of `uint8`, `uint16`, `int16`, `single` or `double`. % % ## Output % * __dst__ output image of the same size and type as `src`. % % ## Options % * __Element__ Structuring element used for erosion. By default, a 3x3 -% rectangular structuring element is used `ones(3)`. Kernel can be -% created using cv.getStructuringElement +% rectangular structuring element is used `ones(3)`. Kernel can be created +% using cv.getStructuringElement % * __Anchor__ Position of the anchor within the element. The default value -% [-1, -1] means that the anchor is at the element center. +% [-1, -1] means that the anchor is at the element center. % * __Iterations__ Number of times erosion is applied. default 1 % * __BorderType__ Border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. default 'Constant' +% image. See cv.copyMakeBorder. default 'Constant' % * __BorderValue__ Border value in case of a constant border. The default -% value has a special meaning which gets automatically translated to the -% maximum value of the image class type (`intmax(class(img))` for -% integer types and `realmax(class(img))` for floating-point types). -% See cv.morphologyDefaultBorderValue +% value has a special meaning which gets automatically translated to the +% maximum value of the image class type (`intmax(class(img))` for integer +% types and `realmax(class(img))` for floating-point types). See +% cv.morphologyDefaultBorderValue % % The function erodes the source image using the specified structuring element % that determines the shape of a pixel neighborhood over which the minimum is % taken: % -% dst(x,y) = min_{(xp,yp): Element(xp,yp)!=0} src(x+xp, y+yp) +% dst(x,y) = min_{(xp,yp): Element(xp,yp)!=0} src(x+xp, y+yp) % % Erosion can be applied several (`Iterations`) times. In case of % multi-channel images, each channel is processed independently. diff --git a/+cv/estimateAffine2D.m b/+cv/estimateAffine2D.m index 74cab603f..0df0a4d82 100644 --- a/+cv/estimateAffine2D.m +++ b/+cv/estimateAffine2D.m @@ -1,37 +1,37 @@ %ESTIMATEAFFINE2D Computes an optimal affine transformation between two 2D point sets % -% H = cv.estimateAffine2D(from, to) -% [H, inliers] = cv.estimateAffine2D(...) -% [...] = cv.estimateAffine2D(..., 'OptionName', optionValue, ...) +% H = cv.estimateAffine2D(from, to) +% [H, inliers] = cv.estimateAffine2D(...) +% [...] = cv.estimateAffine2D(..., 'OptionName', optionValue, ...) % % ## Input % * __from__ First input 2D point set. Cell array of 2-element vectors -% `{[x,y],...}` or Nx2/Nx1x2/1xNx2 numeric array. +% `{[x,y],...}` or Nx2/Nx1x2/1xNx2 numeric array. % * __to__ Second input 2D point set. Same size and type as `from`. % % ## Output % * __H__ Output 2D affine transformation matrix 2x3 or empty matrix if -% transformation could not be estimated. +% transformation could not be estimated. % * __inliers__ Output vector of same length as number of points, indicating -% which points are inliers. +% which points are inliers. % % ## Options % * __Method__ Robust method used to compute tranformation. RANSAC is the -% default method. The following methods are possible: -% * __Ransac__ RANSAC-based robust method. -% * __LMedS__ Least-Median robust method -% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm -% to consider a point as an inlier. Applies only to RANSAC. default 3.0. +% default method. The following methods are possible: +% * __Ransac__ RANSAC-based robust method. +% * __LMedS__ Least-Median robust method +% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm to +% consider a point as an inlier. Applies only to RANSAC. default 3.0. % * __MaxIters__ The maximum number of robust method iterations, 2000 -% (default) is the maximum it can be. +% (default) is the maximum it can be. % * __Confidence__ Confidence level, between 0 and 1, for the estimated -% transformation. Anything between 0.95 and 0.99 is usually good -% enough. Values too close to 1 can slow down the estimation -% significantly. Values lower than 0.8-0.9 can result in an -% incorrectly estimated transformation. default 0.99. +% transformation. Anything between 0.95 and 0.99 is usually good enough. +% Values too close to 1 can slow down the estimation significantly. Values +% lower than 0.8-0.9 can result in an incorrectly estimated transformation. +% default 0.99. % * __RefineIters__ Maximum number of iterations of refining algorithm -% (Levenberg-Marquardt). Passing 0 will disable refining, so the output -% matrix will be output of robust method. default 10 +% (Levenberg-Marquardt). Passing 0 will disable refining, so the output +% matrix will be output of robust method. default 10 % % The function estimates an optimal 2D affine transformation between two 2D % point sets using the selected robust algorithm. diff --git a/+cv/estimateAffine3D.m b/+cv/estimateAffine3D.m index 07d517eab..5e64446a1 100644 --- a/+cv/estimateAffine3D.m +++ b/+cv/estimateAffine3D.m @@ -1,29 +1,31 @@ %ESTIMATEAFFINE3D Computes an optimal affine transformation between two 3D point sets % -% M = cv.estimateAffine3D(src, dst) -% [M, inliers] = cv.estimateAffine3D(...) -% [...] = cv.estimateAffine3D(..., 'OptionName', optionValue, ...) +% M = cv.estimateAffine3D(src, dst) +% [M, inliers] = cv.estimateAffine3D(...) +% [...] = cv.estimateAffine3D(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ First input 3D point set. Cell array of 3-element vectors -% `{[x,y,z],...}` or Nx3/Nx1x3/1xNx3 numeric array. +% `{[x,y,z],...}` or Nx3/Nx1x3/1xNx3 numeric array. % * __dst__ Second input 3D point set. Same size and type as `src`. % % ## Output % * __M__ Output 3D affine transformation matrix 3x4 % * __inliers__ Output vector of same length as number of points, indicating -% which points are inliers. +% which points are inliers. % * __result__ success flag. % % ## Options -% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm -% to consider a point as an inlier. default 3.0. +% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm to +% consider a point as an inlier. default 3.0. % * __Confidence__ Confidence level, between 0 and 1, for the estimated -% transformation. Anything between 0.95 and 0.99 is usually good -% enough. Values too close to 1 can slow down the estimation -% significantly. Values lower than 0.8-0.9 can result in an -% incorrectly estimated transformation. default 0.99. +% transformation. Anything between 0.95 and 0.99 is usually good enough. +% Values too close to 1 can slow down the estimation significantly. Values +% lower than 0.8-0.9 can result in an incorrectly estimated transformation. +% default 0.99. % % The function estimates an optimal 3D affine transformation between two 3D % point sets using the RANSAC algorithm. % +% See also: cv.estimateAffine2D +% diff --git a/+cv/estimateAffinePartial2D.m b/+cv/estimateAffinePartial2D.m index efc6c3733..e3e3e7ec8 100644 --- a/+cv/estimateAffinePartial2D.m +++ b/+cv/estimateAffinePartial2D.m @@ -1,37 +1,37 @@ %ESTIMATEAFFINEPARTIAL2D Computes an optimal limited affine transformation with 4 degrees of freedom between two 2D point sets % -% H = cv.estimateAffinePartial2D(from, to) -% [H, inliers] = cv.estimateAffinePartial2D(...) -% [...] = cv.estimateAffinePartial2D(..., 'OptionName', optionValue, ...) +% H = cv.estimateAffinePartial2D(from, to) +% [H, inliers] = cv.estimateAffinePartial2D(...) +% [...] = cv.estimateAffinePartial2D(..., 'OptionName', optionValue, ...) % % ## Input % * __from__ First input 2D point set. Cell array of 2-element vectors -% `{[x,y],...}` or Nx2/Nx1x2/1xNx2 numeric array. +% `{[x,y],...}` or Nx2/Nx1x2/1xNx2 numeric array. % * __to__ Second input 2D point set. Same size and type as `from`. % % ## Output % * __H__ Output 2D affine transformation (4 degrees of freedom) matrix 2x3 or -% empty matrix if transformation could not be estimated. +% empty matrix if transformation could not be estimated. % * __inliers__ Output vector of same length as number of points, indicating -% which points are inliers. +% which points are inliers. % % ## Options % * __Method__ Robust method used to compute tranformation. RANSAC is the -% default method. The following methods are possible: -% * __Ransac__ RANSAC-based robust method. -% * __LMedS__ Least-Median robust method -% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm -% to consider a point as an inlier. Applies only to RANSAC. default 3.0. +% default method. The following methods are possible: +% * __Ransac__ RANSAC-based robust method. +% * __LMedS__ Least-Median robust method +% * __RansacThreshold__ Maximum reprojection error in the RANSAC algorithm to +% consider a point as an inlier. Applies only to RANSAC. default 3.0. % * __MaxIters__ The maximum number of robust method iterations, 2000 -% (default) is the maximum it can be. +% (default) is the maximum it can be. % * __Confidence__ Confidence level, between 0 and 1, for the estimated -% transformation. Anything between 0.95 and 0.99 is usually good -% enough. Values too close to 1 can slow down the estimation -% significantly. Values lower than 0.8-0.9 can result in an -% incorrectly estimated transformation. default 0.99. +% transformation. Anything between 0.95 and 0.99 is usually good enough. +% Values too close to 1 can slow down the estimation significantly. Values +% lower than 0.8-0.9 can result in an incorrectly estimated transformation. +% default 0.99. % * __RefineIters__ Maximum number of iterations of refining algorithm -% (Levenberg-Marquardt). Passing 0 will disable refining, so the output -% matrix will be output of robust method. default 10 +% (Levenberg-Marquardt). Passing 0 will disable refining, so the output +% matrix will be output of robust method. default 10 % % The function estimates an optimal 2D affine transformation with 4 degrees of % freedom limited to combinations of translation, rotation, and uniform @@ -43,8 +43,8 @@ % % Estimated transformation matrix is: % -% [cos(theta)*s, -sin(theta)*s, tx; -% sin(theta)*s, cos(theta)*s, ty] +% [cos(theta)*s, -sin(theta)*s, tx; +% sin(theta)*s, cos(theta)*s, ty] % % Where `theta` is the rotation angle, `s` the scaling factor and `tx, ty` are % translations in `x, y` axes respectively. diff --git a/+cv/estimateGlobalMotionLeastSquares.m b/+cv/estimateGlobalMotionLeastSquares.m index b9979a830..3261ee6d7 100644 --- a/+cv/estimateGlobalMotionLeastSquares.m +++ b/+cv/estimateGlobalMotionLeastSquares.m @@ -1,15 +1,15 @@ %ESTIMATEGLOBALMOTIONLEASTSQUARES Estimates best global motion between two 2D point clouds in the least-squares sense % -% M = cv.estimateGlobalMotionLeastSquares(points0, points1) -% [M,rmse] = cv.estimateGlobalMotionLeastSquares(points0, points1) -% [...] = cv.estimateGlobalMotionLeastSquares(..., 'OptionName',optionValue, ...) +% M = cv.estimateGlobalMotionLeastSquares(points0, points1) +% [M,rmse] = cv.estimateGlobalMotionLeastSquares(points0, points1) +% [...] = cv.estimateGlobalMotionLeastSquares(..., 'OptionName',optionValue, ...) % % ## Input % * __points0__ Source set of 2D points of type `single`, stored in numeric -% array (Nx2/Nx1x2/1xNx2) or cell-array of 2-element vectors -% `{[x,y], [x,y], ...}`. +% array (Nx2/Nx1x2/1xNx2) or cell-array of 2-element vectors +% `{[x,y], [x,y], ...}`. % * __points1__ Destination set of 2D points of type `single`, of same format -% as `points0`. +% as `points0`. % % ## Output % * __M__ 3x3 2D transformation matrix of type `single`. @@ -17,12 +17,12 @@ % % ## Options % * __MotionModel__ Motion model between two point clouds. One of: -% * __Translation__ -% * __TranslationAndScale__ -% * __Rotation__ -% * __Rigid__ -% * __Similarity__ -% * __Affine__ (default) +% * __Translation__ +% * __TranslationAndScale__ +% * __Rotation__ +% * __Rigid__ +% * __Similarity__ +% * __Affine__ (default) % % See also: cv.estimateGlobalMotionRansac % diff --git a/+cv/estimateGlobalMotionRansac.m b/+cv/estimateGlobalMotionRansac.m index 890ba160d..f9291d535 100644 --- a/+cv/estimateGlobalMotionRansac.m +++ b/+cv/estimateGlobalMotionRansac.m @@ -1,15 +1,15 @@ %ESTIMATEGLOBALMOTIONRANSAC Estimates best global motion between two 2D point clouds robustly (using RANSAC method) % -% M = cv.estimateGlobalMotionRansac(points0, points1) -% [M,rmse,ninliers] = cv.estimateGlobalMotionRansac(points0, points1) -% [...] = cv.estimateGlobalMotionRansac(..., 'OptionName',optionValue, ...) +% M = cv.estimateGlobalMotionRansac(points0, points1) +% [M,rmse,ninliers] = cv.estimateGlobalMotionRansac(points0, points1) +% [...] = cv.estimateGlobalMotionRansac(..., 'OptionName',optionValue, ...) % % ## Input % * __points0__ Source set of 2D points of type `single`, stored in numeric -% array (Nx2/Nx1x2/1xNx2) or cell-array of 2-element vectors -% `{[x,y], [x,y], ...}`. +% array (Nx2/Nx1x2/1xNx2) or cell-array of 2-element vectors +% `{[x,y], [x,y], ...}`. % * __points1__ Destination set of 2D points of type `single`, of same format -% as `points0`. +% as `points0`. % % ## Output % * __M__ 3x3 2D transformation matrix of type `single`. @@ -18,32 +18,30 @@ % % ## Options % * __MotionModel__ Motion model between two point clouds. One of: -% * __Translation__ -% * __TranslationAndScale__ -% * __Rotation__ -% * __Rigid__ -% * __Similarity__ -% * __Affine__ (default) +% * __Translation__ +% * __TranslationAndScale__ +% * __Rotation__ +% * __Rigid__ +% * __Similarity__ +% * __Affine__ (default) % * __RansacParams__ RANSAC method parameters. A struct with the following -% fields: -% * __Size__ Subset size. -% * __Thresh__ Maximum re-projection error value to classify -% as inlier. -% * __Eps__ Maximum ratio of incorrect correspondences. -% * __Prob__ Required success probability. -% -% If a string is passed, it uses the default RANSAC parameters for the -% given motion model. Here are the defaults corresponding to each motion -% model: -% -% * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` -% -% By default is it set to 'Affine'. +% fields: +% * __Size__ Subset size. +% * __Thresh__ Maximum re-projection error value to classify as inlier. +% * __Eps__ Maximum ratio of incorrect correspondences. +% * __Prob__ Required success probability. +% +% If a string is passed, it uses the default RANSAC parameters for the given +% motion model. Here are the defaults corresponding to each motion model: +% +% * __Translation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% * __TranslationAndScale__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% * __Rotation__ `struct('Size',1, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% * __Rigid__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% * __Similarity__ `struct('Size',2, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% * __Affine__ `struct('Size',3, 'Thresh',0.5, 'Eps',0.5, 'Prob',0.99)` +% +% By default is it set to 'Affine'. % % See also: cv.estimateGlobalMotionLeastSquares % diff --git a/+cv/estimateRigidTransform.m b/+cv/estimateRigidTransform.m index b9954deb0..5198b965e 100644 --- a/+cv/estimateRigidTransform.m +++ b/+cv/estimateRigidTransform.m @@ -1,23 +1,24 @@ %ESTIMATERIGIDTRANSFORM Computes an optimal affine transformation between two 2D point sets % -% M = cv.estimateRigidTransform(src, dst) -% M = cv.estimateRigidTransform(src, dst, 'OptionName', optionValue, ...) +% M = cv.estimateRigidTransform(src, dst) +% M = cv.estimateRigidTransform(src, dst, 'OptionName', optionValue, ...) % % ## Input % * __src__ First input 2D point set stored in a cell array of 2-element -% vectors `{[x,y], ...}`, or first image (8-bit numeric array, 1- or -% 3-channels). +% vectors `{[x,y], ...}`, or first image (8-bit numeric array, 1- or +% 3-channels). % * __dst__ Second input 2D point set of the same size and the same type as -% `src`, or second image (8-bit numeric array, 1- or 3-channels). +% `src`, or second image (8-bit numeric array, 1- or 3-channels). % % ## Output % * __M__ output 2x3 affine transformation `[A|b]` matrix (see below). % % ## Options % * __FullAffine__ If true, the function finds an optimal affine transformation -% with no additional resrictions (6 degrees of freedom). Otherwise, the -% class of transformations to choose from is limited to combinations of -% translation, rotation, and uniform scaling (4 degrees of freedom). +% with no additional resrictions (6 degrees of freedom). Otherwise, the +% class of transformations to choose from is limited to combinations of +% translation, rotation, and uniform scaling (4 degrees of freedom). +% default false % % The function finds an optimal affine transform `[A|b]` (a 2x3 floating-point % matrix) that approximates best the affine transformation between: @@ -30,14 +31,14 @@ % In case of point sets, the problem is formulated as follows: you need to % find a 2x2 matrix `A` and 2x1 vector `b` so that: % -% [A*|b*] = argmin_{[A|b]} sum_{i}(|| dst{i} - A*src{i}' - b ||^2) +% [A*|b*] = argmin_{[A|b]} sum_{i}(|| dst{i} - A*src{i}' - b ||^2) % % where `src{i}` and `dst{i}` are the i-th points in `src` and `dst`, % respectively. `[A|b]` can be either arbitrary (when `FullAffine=true`) or % have a form of: % -% [ a11, a12, b1; -% -a12, a11, b2 ] +% [ a11, a12, b1; +% -a12, a11, b2 ] % % when `FullAffine=false`. % diff --git a/+cv/evaluateFeatureDetector.m b/+cv/evaluateFeatureDetector.m index 87c8f4e6e..10965cad8 100644 --- a/+cv/evaluateFeatureDetector.m +++ b/+cv/evaluateFeatureDetector.m @@ -1,7 +1,7 @@ %EVALUATEFEATUREDETECTOR Evaluates a feature detector % -% [repeatability, correspCount] = cv.evaluateFeatureDetector(img1, img2, H1to2, keypoints1, keypoints2) -% [...] = cv.evaluateFeatureDetector(..., 'OptionName',optionValue, ...) +% [repeatability, correspCount] = cv.evaluateFeatureDetector(img1, img2, H1to2, keypoints1, keypoints2) +% [...] = cv.evaluateFeatureDetector(..., 'OptionName',optionValue, ...) % % ## Input % * __img1__ First image. @@ -16,20 +16,19 @@ % % ## Options % * __Detector__ feature detector that finds keypoints in images. It can be -% specified as a string containing the type of feature detector, such as -% 'ORB'. It can also be specified as a cell-array of the form -% `{fdetector, 'key',val, ...}`, where the first element is the type, -% and the remaining elements are optional parameters used to construct -% the specified feature detector. See cv.FeatureDetector for possible -% types. Not set by default (i.e: you must supply pre-detected keypoints -% in the inputs). +% specified as a string containing the type of feature detector, such as +% 'ORB'. It can also be specified as a cell-array of the form +% `{fdetector, 'key',val, ...}`, where the first element is the type, and +% the remaining elements are optional parameters used to construct the +% specified feature detector. See cv.FeatureDetector for possible types. Not +% set by default (i.e: you must supply pre-detected keypoints in the inputs). % % ## Example % -% detector = cv.FeatureDetector('SURF'); -% kp1 = detector.detect(img1) -% kp2 = detector.detect(img2) -% [rep,corresp] = cv.evaluateFeatureDetector(img1, img2, H1to2, kp1, kp2) +% detector = cv.FeatureDetector('SURF'); +% kp1 = detector.detect(img1) +% kp2 = detector.detect(img2) +% [rep,corresp] = cv.evaluateFeatureDetector(img1, img2, H1to2, kp1, kp2) % % See also: cv.computeRecallPrecisionCurve, cv.FeatureDetector % diff --git a/+cv/fastNlMeansDenoising.m b/+cv/fastNlMeansDenoising.m index 33beb0e18..8d2f3386b 100644 --- a/+cv/fastNlMeansDenoising.m +++ b/+cv/fastNlMeansDenoising.m @@ -1,30 +1,29 @@ %FASTNLMEANSDENOISING Image denoising using Non-local Means Denoising algorithm % -% dst = cv.fastNlMeansDenoising(src) -% dst = cv.fastNlMeansDenoising(src, 'OptionName',optionValue, ...) +% dst = cv.fastNlMeansDenoising(src) +% dst = cv.fastNlMeansDenoising(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit (or 16-bit with `L1` norm) 1-channel, 2-channel, -% 3-channel or 4-channel image. +% 3-channel or 4-channel image. % % ## Output % * __dst__ Output image with the same size and type as `src`. % % ## Options % * __H__ Array of parameters regulating filter strength, either one parameter -% applied to all channels or one value per channel in `dst`. Big `H` -% value perfectly removes noise but also removes image details, smaller -% `H` value preserves details but also preserves some noise. default [3] +% applied to all channels or one value per channel in `dst`. Big `H` value +% perfectly removes noise but also removes image details, smaller `H` value +% preserves details but also preserves some noise. default [3] % * __TemplateWindowSize__ Size in pixels of the template patch that is used -% to compute weights. Should be odd. Recommended value 7 pixels. -% default 7 +% to compute weights. Should be odd. Recommended value 7 pixels. default 7 % * __SearchWindowSize__ Size in pixels of the window that is used to compute -% weighted average for given pixel. Should be odd. Affect performance -% linearly: greater `SearchWindowsSize` - greater denoising time. -% Recommended value 21 pixels. default 21 +% weighted average for given pixel. Should be odd. Affect performance +% linearly: greater `SearchWindowsSize` - greater denoising time. +% Recommended value 21 pixels. default 21 % * __NormType__ Type of norm used for weight calculation. Can be either: -% * __L2__ (default) -% * __L1__ +% * __L2__ (default) +% * __L1__ % % Perform image denoising using Non-local Means Denoising % [algorithm](http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/) diff --git a/+cv/fastNlMeansDenoisingColored.m b/+cv/fastNlMeansDenoisingColored.m index ada20a372..2f2c564e7 100644 --- a/+cv/fastNlMeansDenoisingColored.m +++ b/+cv/fastNlMeansDenoisingColored.m @@ -1,7 +1,7 @@ %FASTNLMEANSDENOISINGCOLORED Modification of fastNlMeansDenoising function for colored images % -% dst = cv.fastNlMeansDenoisingColored(src) -% dst = cv.fastNlMeansDenoisingColored(src, 'OptionName',optionValue, ...) +% dst = cv.fastNlMeansDenoisingColored(src) +% dst = cv.fastNlMeansDenoisingColored(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel or 4-channel image. @@ -11,23 +11,21 @@ % % ## Options % * __H__ Parameter regulating filter strength for luminance component. Bigger -% `H` value perfectly removes noise but also removes image details, -% smaller `H` value preserves details but also preserves some noise. -% default 3 +% `H` value perfectly removes noise but also removes image details, smaller +% `H` value preserves details but also preserves some noise. default 3 % * __HColor__ The same as `H` but for color components. For most images value -% equals 10 will be enough to remove colored noise and do not distort -% colors. default 3 +% equals 10 will be enough to remove colored noise and do not distort +% colors. default 3 % * __TemplateWindowSize__ Size in pixels of the template patch that is used -% to compute weights. Should be odd. Recommended value 7 pixels. -% default 7 +% to compute weights. Should be odd. Recommended value 7 pixels. default 7 % * __SearchWindowSize__ Size in pixels of the window that is used to compute -% weighted average for given pixel. Should be odd. Affect performance -% linearly: greater `SearchWindowsSize` - greater denoising time. -% Recommended value 21 pixels. default 21 +% weighted average for given pixel. Should be odd. Affect performance +% linearly: greater `SearchWindowsSize` - greater denoising time. +% Recommended value 21 pixels. default 21 % * __FlipChannels__ whether to flip the order of color channels in input -% `src` and output `dst`, between MATLAB's RGB/RGBA order and -% OpenCV's BGR/BGRA (input: RGB/RGBA->BGR/BGRA, -% output: BGR/BGRA->RGB/RGBA). default false +% `src` and output `dst`, between MATLAB's RGB/RGBA order and OpenCV's +% BGR/BGRA (input: RGB/RGBA->BGR/BGRA, output: BGR/BGRA->RGB/RGBA). +% default false % % The function converts image to CIELAB colorspace and then separately denoise % L and AB components with given `H` parameters using cv.fastNlMeansDenoising diff --git a/+cv/fastNlMeansDenoisingColoredMulti.m b/+cv/fastNlMeansDenoisingColoredMulti.m index ab584dde6..a4425a4fb 100644 --- a/+cv/fastNlMeansDenoisingColoredMulti.m +++ b/+cv/fastNlMeansDenoisingColoredMulti.m @@ -1,39 +1,37 @@ %FASTNLMEANSDENOISINGCOLOREDMULTI Modification of fastNlMeansDenoisingMulti function for colored images sequences % -% dst = cv.fastNlMeansDenoisingColoredMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize) -% dst = cv.fastNlMeansDenoisingColoredMulti(..., 'OptionName',optionValue, ...) +% dst = cv.fastNlMeansDenoisingColoredMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize) +% dst = cv.fastNlMeansDenoisingColoredMulti(..., 'OptionName',optionValue, ...) % % ## Input % * __srcImgs__ Input 8-bit 3-channel images sequence. All images should have -% the same type and size. +% the same type and size. % * __imgToDenoiseIndex__ Target image to denoise index in `srcImgs` sequence -% (0-based index). +% (0-based index). % * __temporalWindowSize__ Number of surrounding images to use for target -% image denoising. Should be odd. Images from -% `imgToDenoiseIndex - temporalWindowSize/2` to -% `imgToDenoiseIndex + temporalWindowSize/2` from `srcImgs` will be used -% to denoise `srcImgs{imgToDenoiseIndex}` image. +% image denoising. Should be odd. Images from +% `imgToDenoiseIndex - temporalWindowSize/2` to +% `imgToDenoiseIndex + temporalWindowSize/2` from `srcImgs` will be used to +% denoise `srcImgs{imgToDenoiseIndex}` image. % % ## Output % * __dst__ Output image with the same size and type as `srcImgs` images. % % ## Options % * __H__ Parameter regulating filter strength for luminance component. Bigger -% `H` value perfectly removes noise but also removes image details, -% smaller `H` value preserves details but also preserves some noise. -% default 3 +% `H` value perfectly removes noise but also removes image details, smaller +% `H` value preserves details but also preserves some noise. default 3 % * __HColor__ The same as `H` but for color components. default 3 % * __TemplateWindowSize__ Size in pixels of the template patch that is used -% to compute weights. Should be odd. Recommended value 7 pixels. -% default 7 +% to compute weights. Should be odd. Recommended value 7 pixels. default 7 % * __SearchWindowSize__ Size in pixels of the window that is used to compute -% weighted average for given pixel. Should be odd. Affect performance -% linearly: greater `SearchWindowsSize` - greater denoising time. -% Recommended value 21 pixels. default 21 +% weighted average for given pixel. Should be odd. Affect performance +% linearly: greater `SearchWindowsSize` - greater denoising time. +% Recommended value 21 pixels. default 21 % * __FlipChannels__ whether to flip the order of color channels in input -% `srcImgs{i}` and output `dst`, between MATLAB's RGB/RGBA order and -% OpenCV's BGR/BGRA (input: RGB/RGBA->BGR/BGRA, -% output: BGR/BGRA->RGB/RGBA). default false +% `srcImgs{i}` and output `dst`, between MATLAB's RGB/RGBA order and +% OpenCV's BGR/BGRA (input: RGB/RGBA->BGR/BGRA, output: BGR/BGRA->RGB/RGBA). +% default false % % The function converts images to CIELAB colorspace and then separately % denoise L and AB components with given `H` parameters using diff --git a/+cv/fastNlMeansDenoisingMulti.m b/+cv/fastNlMeansDenoisingMulti.m index b461cb27c..8b77843e0 100644 --- a/+cv/fastNlMeansDenoisingMulti.m +++ b/+cv/fastNlMeansDenoisingMulti.m @@ -1,38 +1,37 @@ %FASTNLMEANSDENOISINGMULTI Modification of fastNlMeansDenoising function for colored images sequences % -% dst = cv.fastNlMeansDenoisingMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize) -% dst = cv.fastNlMeansDenoisingMulti(..., 'OptionName',optionValue, ...) +% dst = cv.fastNlMeansDenoisingMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize) +% dst = cv.fastNlMeansDenoisingMulti(..., 'OptionName',optionValue, ...) % % ## Input % * __srcImgs__ Input 8-bit (or 16-bit with 'L1' norm) 1-channel, 2-channel, -% 3-channel or 4-channel images sequence. All images should have the -% same type and size. +% 3-channel or 4-channel images sequence. All images should have the same +% type and size. % * __imgToDenoiseIndex__ Target image to denoise index in `srcImgs` sequence -% (0-based index). +% (0-based index). % * __temporalWindowSize__ Number of surrounding images to use for target -% image denoising. Should be odd. Images from -% `imgToDenoiseIndex - temporalWindowSize/2` to -% `imgToDenoiseIndex + temporalWindowSize/2` from `srcImgs` will be used -% to denoise `srcImgs{imgToDenoiseIndex}` image. +% image denoising. Should be odd. Images from +% `imgToDenoiseIndex - temporalWindowSize/2` to +% `imgToDenoiseIndex + temporalWindowSize/2` from `srcImgs` will be used to +% denoise `srcImgs{imgToDenoiseIndex}` image. % % ## Output % * __dst__ Output image with the same size and type as `srcImgs` images. % % ## Options % * __H__ Array of parameters regulating filter strength, either one parameter -% applied to all channels or one value per channel in `dst`. Big `H` -% value perfectly removes noise but also removes image details, smaller -% `H` value preserves details but also preserves some noise. default [3] +% applied to all channels or one value per channel in `dst`. Big `H` value +% perfectly removes noise but also removes image details, smaller `H` value +% preserves details but also preserves some noise. default [3] % * __TemplateWindowSize__ Size in pixels of the template patch that is used -% to compute weights. Should be odd. Recommended value 7 pixels. -% default 7 +% to compute weights. Should be odd. Recommended value 7 pixels. default 7 % * __SearchWindowSize__ Size in pixels of the window that is used to compute -% weighted average for given pixel. Should be odd. Affect performance -% linearly: greater `SearchWindowsSize` - greater denoising time. -% Recommended value 21 pixels. default 21 +% weighted average for given pixel. Should be odd. Affect performance +% linearly: greater `SearchWindowsSize` - greater denoising time. +% Recommended value 21 pixels. default 21 % * __NormType__ Type of norm used for weight calculation. Can be either: -% * __L2__ (default) -% * __L1__ +% * __L2__ (default) +% * __L1__ % % Modification of cv.fastNlMeansDenoising function for images sequence where % consequtive images have been captured in small period of time. For example diff --git a/+cv/fillConvexPoly.m b/+cv/fillConvexPoly.m index 87ffb5aa9..3769df9c5 100644 --- a/+cv/fillConvexPoly.m +++ b/+cv/fillConvexPoly.m @@ -1,24 +1,24 @@ %FILLCONVEXPOLY Fills a convex polygon % -% img = cv.fillConvexPoly(img, pts) -% [...] = cv.fillConvexPoly(..., 'OptionName', optionValue, ...) +% img = cv.fillConvexPoly(img, pts) +% [...] = cv.fillConvexPoly(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image. % * __pts__ Polygon vertices, stored in numeric array (Nx2/Nx1x2/1xNx2) or -% cell-array of 2-element vectors `{[x,y], [x,y], ...}`. Supports -% integer `int32` 2D points. +% cell-array of 2-element vectors `{[x,y], [x,y], ...}`. Supports integer +% `int32` 2D points. % % ## Output % * __img__ Output image, same size and type as input `img`. % % ## Options % * __Color__ 3-element floating-point vector specifying polygon color. -% default zeros +% default zeros % * __LineType__ Type of the polygon boundaries. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the vertex coordinates. default 0 % % The function cv.fillConvexPoly draws a filled convex polygon. This function diff --git a/+cv/fillPoly.m b/+cv/fillPoly.m index 5a72a0e88..56b1f08ea 100644 --- a/+cv/fillPoly.m +++ b/+cv/fillPoly.m @@ -1,24 +1,24 @@ %FILLPOLY Fills the area bounded by one or more polygons % -% img = cv.fillPoly(img, pts) -% [...] = cv.fillPoly(..., 'OptionName', optionValue, ...) +% img = cv.fillPoly(img, pts) +% [...] = cv.fillPoly(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image. % * __pts__ Array of polygons where each polygon is represented as an array -% of points. A cell array of cell arrays of 2-element vectors, in the -% form: `{{[x,y], [x,y], ...}, ...}`, or a cell array of Nx2 matrices. +% of points. A cell array of cell arrays of 2-element vectors, in the form: +% `{{[x,y], [x,y], ...}, ...}`, or a cell array of Nx2 matrices. % % ## Output % * __img__ Output image, same size and type as input `img`. % % ## Options % * __Color__ 3-element floating-point vector specifying polygon color. -% default zeros +% default zeros % * __LineType__ Type of the polygon boundaries. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the vertex coordinates. default 0 % * __Offset__ Optional offset of all points of the contours. default `[0,0]` % diff --git a/+cv/filter2D.m b/+cv/filter2D.m index 55a7cdb0e..8b8cda0ea 100644 --- a/+cv/filter2D.m +++ b/+cv/filter2D.m @@ -1,35 +1,35 @@ %FILTER2D Convolves an image with the kernel % -% dst = cv.filter2D(src, kernel) -% dst = cv.filter2D(src, kernel, 'OptionName',optionValue, ...) +% dst = cv.filter2D(src, kernel) +% dst = cv.filter2D(src, kernel, 'OptionName',optionValue, ...) % % ## Input % * __img__ input image. % * __kernel__ Convolution kernel (or rather a correlation kernel), a -% single-channel floating-point matrix. If you want to apply different -% kernels to different channels, split the image into separate color -% planes and process them individually. +% single-channel floating-point matrix. If you want to apply different +% kernels to different channels, split the image into separate color planes +% and process them individually. % % ## Output % * __result__ output image of the same size and the same number of channels -% as `src`. +% as `src`. % % ## Options % * __DDepth__ desired depth of the destination image. default -1. When -% `DDepth=-1`, the output image will have the same depth as the source. -% The following combinations are supported: -% * SDepth = 'uint8' --> DDepth = -1, 'int16', 'single', 'double' -% * SDepth = 'uint16', 'int16' --> DDepth = -1, 'single', 'double' -% * SDepth = 'single' --> DDepth = -1, 'single', 'double' -% * SDepth = 'double' --> DDepth = -1, 'double' +% `DDepth=-1`, the output image will have the same depth as the source. +% The following combinations are supported: +% * `SDepth = uint8 --> DDepth = -1, int16, single, double` +% * `SDepth = uint16, int16 --> DDepth = -1, single, double` +% * `SDepth = single --> DDepth = -1, single, double` +% * `SDepth = double --> DDepth = -1, double` % * __Anchor__ Anchor of the kernel that indicates the relative position of a -% filtered point within the kernel. The anchor should lie within the -% kernel. The special default value (-1,-1) means that the anchor is at -% the kernel center. +% filtered point within the kernel. The anchor should lie within the kernel. +% The special default value (-1,-1) means that the anchor is at the kernel +% center. % * __Delta__ optional value added to the filtered pixels before storing them -% in `dst`. Default 0 +% in `dst`. Default 0 % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. -% Default 'Default' +% Default 'Default' % % The function applies an arbitrary linear filter to an image. When the % aperture is partially outside the image, the function interpolates outlier @@ -37,8 +37,8 @@ % % The function does actually compute correlation, not the convolution: % -% dst(x,y) = sum_{0 <= xp <= size(kernel,2), 0 <= yp <= size(kernel,1)} -% kernel(xp,yp) * src(x + xp - anchor(1), y + yp - anchor(2)) +% dst(x,y) = sum_{0 <= xp <= size(kernel,2), 0 <= yp <= size(kernel,1)} +% kernel(xp,yp) * src(x + xp - anchor(1), y + yp - anchor(2)) % % That is, the kernel is not mirrored around the anchor point. If you need a % real convolution, flip the kernel using cv.flip and set the new anchor to diff --git a/+cv/filterSpeckles.m b/+cv/filterSpeckles.m index cae4acb31..06e84af89 100644 --- a/+cv/filterSpeckles.m +++ b/+cv/filterSpeckles.m @@ -1,17 +1,17 @@ %FILTERSPECKLES Filters off small noise blobs (speckles) in the disparity map % -% img = cv.filterSpeckles(img, newVal, maxSpeckleSize, maxDiff) +% img = cv.filterSpeckles(img, newVal, maxSpeckleSize, maxDiff) % % ## Input % * __img__ The input 1-channel 16-bit signed disparity image. % * __newVal__ The disparity value used to paint-off the speckles. % * __maxSpeckleSize__ The maximum speckle size to consider it a speckle. -% Larger blobs are not affected by the algorithm +% Larger blobs are not affected by the algorithm % * __maxDiff__ Maximum difference between neighbor disparity pixels to put -% them into the same blob. Note that since cv.StereoBM, cv.StereoSGBM -% and may be other algorithms return a fixed-point disparity map, where -% disparity values are multiplied by 16, this scale factor should be -% taken into account when specifying this parameter value. +% them into the same blob. Note that since cv.StereoBM, cv.StereoSGBM and +% may be other algorithms return a fixed-point disparity map, where +% disparity values are multiplied by 16, this scale factor should be taken +% into account when specifying this parameter value. % % ## Output % * __img__ Filtered disparity image. diff --git a/+cv/find4QuadCornerSubpix.m b/+cv/find4QuadCornerSubpix.m index 0821abf0f..3b3acd8c4 100644 --- a/+cv/find4QuadCornerSubpix.m +++ b/+cv/find4QuadCornerSubpix.m @@ -1,17 +1,17 @@ %FIND4QUADCORNERSUBPIX Finds subpixel-accurate positions of the chessboard corners % -% [corners,success] = cv.find4QuadCornerSubpix(img, corners) -% [...] = cv.find4QuadCornerSubpix(img, corners, 'OptionName', optionValue, ...) +% [corners,success] = cv.find4QuadCornerSubpix(img, corners) +% [...] = cv.find4QuadCornerSubpix(img, corners, 'OptionName', optionValue, ...) % % ## Input % * __img__ Input single-channel 8-bit image. % * __corners__ Initial coordinates of the input corners, stored in numeric -% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors -% `{[x,y], ...}`. Supports single floating-point class. +% array (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors `{[x,y], ...}`. +% Supports single floating-point class. % % ## Output % * __corners__ Output refined coordinates, of the same size and type as the -% input `corners` (numeric or cell matching the input format). +% input `corners` (numeric or cell matching the input format). % * __success__ boolean success value. % % ## Options diff --git a/+cv/findChessboardCorners.m b/+cv/findChessboardCorners.m index e326570b7..cde869108 100644 --- a/+cv/findChessboardCorners.m +++ b/+cv/findChessboardCorners.m @@ -1,35 +1,35 @@ %FINDCHESSBOARDCORNERS Finds the positions of internal corners of the chessboard % -% [corners,ok] = cv.findChessboardCorners(im, patternSize) -% [...] = cv.findChessboardCorners(..., 'OptionName', optionValue, ...) +% [corners,ok] = cv.findChessboardCorners(im, patternSize) +% [...] = cv.findChessboardCorners(..., 'OptionName', optionValue, ...) % % ## Input % * __im__ Source chessboard view. It must be an 8-bit grayscale or color -% image. +% image. % * __patternSize__ Number of inner corners per a chessboard row and column -% (`patternSize = [points_per_row,points_per_colum] = [columns,rows]`). +% (`patternSize = [points_per_row,points_per_colum] = [columns,rows]`). % % ## Output % * __corners__ Output array of detected corners. Cell array of 2-element -% vectors `{[x,y], ...}`. Returns an empty cell if it fails to find all -% the corners. +% vectors `{[x,y], ...}`. Returns an empty cell if it fails to find all the +% corners. % * __ok__ returns true if all of the corners are found and they are placed in -% a certain order. Otherwise, if the function fails to find all the -% corners or reorder them, it returns false. +% a certain order. Otherwise, if the function fails to find all the corners +% or reorder them, it returns false. % % ## Options % * __AdaptiveThresh__ Use adaptive thresholding to convert the image to black -% and white, rather than a fixed threshold level (computed from the -% average image brightness). default true. +% and white, rather than a fixed threshold level (computed from the average +% image brightness). default true. % * __NormalizeImage__ Normalize the image gamma with cv.equalizeHist before -% applying fixed or adaptive thresholding. default true. +% applying fixed or adaptive thresholding. default true. % * __FilterQuads__ Use additional criteria (like contour area, perimeter, -% square-like shape) to filter out false quads extracted at the contour -% retrieval stage. default false. +% square-like shape) to filter out false quads extracted at the contour +% retrieval stage. default false. % * __FastCheck__ Run a fast check on the image that looks for chessboard -% corners, and shortcut the call if none is found. This can drastically -% speed up the call in the degenerate condition when no chessboard is -% observed. default false. +% corners, and shortcut the call if none is found. This can drastically +% speed up the call in the degenerate condition when no chessboard is +% observed. default false. % % The function attempts to determine whether the input image is a view of the % chessboard pattern and locate the internal chessboard corners. The function @@ -51,20 +51,20 @@ % ## Example % Sample usage of detecting and drawing chessboard corners: % -% patternsize = [9,6]; % interior number of corners -% gray = imread(fullfile(mexopencv.root(),'test','left01.jpg')); -% % 'FastCheck' saves a lot of time on images -% % that do not contain any chessboard corners -% [corners,patternfound] = cv.findChessboardCorners(gray, patternsize, ... -% 'AdaptiveThresh',true, 'NormalizeImage',true, 'FastCheck',true); -% if patternfound -% corners = cv.cornerSubPix(gray, corners, 'Criteria',... -% struct('type','Count+EPS', 'maxCount',30, 'epsilon',0.1)); -% end -% img = cv.drawChessboardCorners(repmat(gray,[1 1 3]), patternsize, ... -% cat(1,corners{:}), 'PatternWasFound',patternfound); +% patternsize = [9,6]; % interior number of corners +% gray = imread(fullfile(mexopencv.root(),'test','left01.jpg')); +% % 'FastCheck' saves a lot of time on images +% % that do not contain any chessboard corners +% [corners,patternfound] = cv.findChessboardCorners(gray, patternsize, ... +% 'AdaptiveThresh',true, 'NormalizeImage',true, 'FastCheck',true); +% if patternfound +% corners = cv.cornerSubPix(gray, corners, 'Criteria',... +% struct('type','Count+EPS', 'maxCount',30, 'epsilon',0.1)); +% end +% img = cv.drawChessboardCorners(repmat(gray,[1 1 3]), patternsize, ... +% cat(1,corners{:}), 'PatternWasFound',patternfound); % -% ## Note +% ### Note % The function requires white space (like a square-thick border, the wider the % better) around the board to make the detection more robust in various % environments. Otherwise, if there is no border and the background is dark, diff --git a/+cv/findCirclesGrid.m b/+cv/findCirclesGrid.m index bbcf8433d..a06574836 100644 --- a/+cv/findCirclesGrid.m +++ b/+cv/findCirclesGrid.m @@ -1,14 +1,14 @@ %FINDCIRCLESGRID Finds the centers in the grid of circles % -% centers = cv.findCirclesGrid(im, patternSize) -% [centers,patternFound] = cv.findCirclesGrid(im, patternSize) -% [...] = cv.findCirclesGrid(..., 'OptionName', optionValue, ...) +% centers = cv.findCirclesGrid(im, patternSize) +% [centers,patternFound] = cv.findCirclesGrid(im, patternSize) +% [...] = cv.findCirclesGrid(..., 'OptionName', optionValue, ...) % % ## Input % * __im__ Grid view of input circles. It must be an 8-bit grayscale or color -% image. +% image. % * __patternSize__ Number of circles per row and column -% (`patternSize = [points_per_row, points_per_colum]`). +% (`patternSize = [points_per_row, points_per_colum]`). % % ## Output % * __centers__ Cell array of detected centers `{[x,y], ...}`. @@ -16,40 +16,44 @@ % % ## Options % * __SymmetricGrid__ Use symmetric or asymmetric pattern of circles. In an -% asymmetric grid, the circles in each row are staggered transverse to -% the row (important to remember how rows and columns are counted in -% this case). default true. +% asymmetric grid, the circles in each row are staggered transverse to the +% row (important to remember how rows and columns are counted in this case). +% default true. % * __Clustering__ Use a special algorithm for grid detection. It is more -% robust to perspective distortions but much more sensitive to -% background clutter. This is a good choice when you are trying to -% calibrate a camera with an unusually wide field of view. default false. +% robust to perspective distortions but much more sensitive to background +% clutter. This is a good choice when you are trying to calibrate a camera +% with an unusually wide field of view. default false. % * __BlobDetector__ feature detector that finds blobs like dark circles on -% light background. It can be specified as a string containing the type -% of feature detector, such as 'SimpleBlobDetector'. It can also be -% specified as a cell-array of the form `{fdetector, 'key',val, ...}`, -% where the first element is the type, and the remaining elements are -% optional parameters used to construct the specified feature detector. -% See cv.FeatureDetector for possible types. -% default is to use cv.SimpleBlobDetector with its default parameters. -% * __FinderParameters__ Parameters for finding circles in a grid pattern. -% You can specify the parameters as a cell-array that starts with the -% grid type followed by pairs of key-value options -% `{'Symmetric', 'key',val, ...}`, or as a structure where the fields -% are the options `struct('GridType','Symmetric', 'key',val, ...)`. -% * __GridType__ one of 'Symmetric' (default) or 'Asymmetric' -% * __DensityNeighborhoodSize__ default [16,16] -% * __MinDensity__ default 10.0 -% * __KmeansAttempts__ default 100 -% * __MinDistanceToAddKeypoint__ default 20 -% * __KeypointScale__ default 1 -% * __MinGraphConfidence__ default 9.0 -% * __VertexGain__ default 1.0 -% * __VertexPenalty__ default -0.6 -% * __ExistingVertexGain__ default 10000.0 -% * __EdgeGain__ default 1.0 -% * __EdgePenalty__ default -0.6 -% * __ConvexHullFactor__ default 1.1 -% * __MinRNGEdgeSwitchDist__ default 5.0 +% light background. It can be specified as a string containing the type of +% feature detector, such as 'SimpleBlobDetector'. It can also be specified +% as a cell-array of the form `{fdetector, 'key',val, ...}`, where the first +% element is the type, and the remaining elements are optional parameters +% used to construct the specified feature detector. See cv.FeatureDetector +% for possible types. default is to use cv.SimpleBlobDetector with its +% default parameters. +% * __FinderParameters__ Parameters for finding circles in a grid pattern. You +% can specify the parameters as a cell-array that starts with the grid type +% followed by pairs of key-value options `{'Symmetric', 'key',val, ...}`, or +% as a structure where the fields are the options +% `struct('GridType','Symmetric', 'key',val, ...)`. +% * __GridType__ one of 'Symmetric' (default) or 'Asymmetric' +% * __DensityNeighborhoodSize__ default [16,16] +% * __MinDensity__ default 10.0 +% * __KmeansAttempts__ default 100 +% * __MinDistanceToAddKeypoint__ default 20 +% * __KeypointScale__ default 1 +% * __MinGraphConfidence__ default 9.0 +% * __VertexGain__ default 1.0 +% * __VertexPenalty__ default -0.6 +% * __ExistingVertexGain__ default 10000.0 +% * __EdgeGain__ default 1.0 +% * __EdgePenalty__ default -0.6 +% * __ConvexHullFactor__ default 1.1 +% * __MinRNGEdgeSwitchDist__ default 5.0 +% * __SquareSize__ Distance between two adjacent points. Used by +% 'Clustering' algorithm. default 1.0 +% * __MaxRectifiedDistance__ Max deviation from predicion. Used by +% 'Clustering' algorithm. default `SquareSize/2` % % The function attempts to determine whether the input image contains a grid % of circles. If it is, the function locates centers of the circles. The @@ -61,14 +65,14 @@ % ## Example % Sample usage of detecting and drawing the centers of circles: % -% patternSize = [7,7]; % number of centers -% gray = imread('...'); % source 8-bit image -% [centers,patternfound] = cv.findCirclesGrid(gray, patternSize); -% img = cv.drawChessboardCorners(img, patternSize, cat(1,centers{:}), ... -% 'PatternWasFound',patternfound); -% imshow(img) +% patternSize = [7,7]; % number of centers +% gray = imread('...'); % source 8-bit image +% [centers,patternfound] = cv.findCirclesGrid(gray, patternSize); +% img = cv.drawChessboardCorners(img, patternSize, cat(1,centers{:}), ... +% 'PatternWasFound',patternfound); +% imshow(img) % -% ## Note +% ### Note % The function requires white space (like a square-thick border, the wider the % better) around the board to make the detection more robust in various % environments. diff --git a/+cv/findContours.m b/+cv/findContours.m index a7a33b3d7..6fb289621 100644 --- a/+cv/findContours.m +++ b/+cv/findContours.m @@ -1,63 +1,62 @@ %FINDCONTOURS Finds contours in a binary image % -% contours = cv.findContours(image) -% contours = cv.findContours(image, 'OptionName', optionValue, ...) -% [contours,hierarchy] = cv.findContours(...) +% contours = cv.findContours(image) +% contours = cv.findContours(image, 'OptionName', optionValue, ...) +% [contours, hierarchy] = cv.findContours(...) % % ## Input % * __image__ Source, an 8-bit single-channel image. Non-zero pixels are -% treated as 1's. Zero pixels remain 0's, so the image is treated as -% binary. You can use cv.compare, cv.inRange, cv.threshold, -% cv.adaptiveThreshold, cv.Canny, and others to create a binary image -% out of a grayscale or color one. If mode equals to `CComp` or -% `FloodFill`, the input can also be a 32-bit integer image of labels -% (`int32` class). +% treated as 1's. Zero pixels remain 0's, so the image is treated as binary. +% You can use cv.compare, cv.inRange, cv.threshold, cv.adaptiveThreshold, +% cv.Canny, and others to create a binary image out of a grayscale or color +% one. If mode equals to `CComp` or `FloodFill`, the input can also be a +% 32-bit integer image of labels (`int32` class). % % ## Output % * __contours__ Detected contours. Each contour is stored as a vector of -% points. A cell array of cell array of 2D integer points, of the form: -% `{{[x,y],[x,y],...}, ...}`. +% points. A cell array of cell array of 2D integer points, of the form: +% `{{[x,y],[x,y],...}, ...}`. % * __hierarchy__ Optional output vector containing information about the -% image topology. It has as many elements as the number of contours. -% For each i-th contour, `contours{i}`, the elements `hierarchy{i}(1)`, -% `hierarchy{i}(2)`, `hierarchy{i}(3)`, and `hierarchy{i}(4)` are set to -% 0-based indices in `contours` of the next and previous contours at the -% same hierarchical level, the first child contour and the parent -% contour, respectively. If for the i-th contour there are no next, -% previous, parent, or nested contours, the corresponding elements of -% `hierarchy{i}` will be negative. A cell array of 4-element integer -% vectors of the form `{[next,prev,child,parent], ...}`. +% image topology. It has as many elements as the number of contours. For +% each i-th contour, `contours{i}`, the elements `hierarchy{i}(1)`, +% `hierarchy{i}(2)`, `hierarchy{i}(3)`, and `hierarchy{i}(4)` are set to +% 0-based indices in `contours` of the next and previous contours at the +% same hierarchical level, the first child contour and the parent contour, +% respectively. If for the i-th contour there are no next, previous, parent, +% or nested contours, the corresponding elements of `hierarchy{i}` will be +% negative. A cell array of 4-element integer vectors of the form +% `{[next,prev,child,parent], ...}`. % % ## Options % * __Mode__ Contour retrieval mode, default is 'List'. One of: -% * __External__ retrieves only the extreme outer contours. It sets -% `hierarchy{i}(3)=hierarchy{i}(4)=-1` for all the contours. -% * __List__ retrieves all of the contours without establishing any -% hierarchical relationships. It also sets -% `hierarchy{i}(3)=hierarchy{i}(4)=-1` for all the contours. -% * __CComp__ retrieves all of the contours and organizes them into a -% two-level hierarchy. At the top level, there are external -% boundaries of the components. At the second level, there are -% boundaries of the holes. If there is another contour inside a -% hole of a connected component, it is still put at the top level. -% * __Tree__ retrieves all of the contours and reconstructs a full -% hierarchy of nested contours -% * __FloodFill__ connected components of a multi-level image (only valid -% for 32-bit integer images). +% * __External__ retrieves only the extreme outer contours. It sets +% `hierarchy{i}(3)=hierarchy{i}(4)=-1` for all the contours. +% * __List__ retrieves all of the contours without establishing any +% hierarchical relationships. It also sets +% `hierarchy{i}(3)=hierarchy{i}(4)=-1` for all the contours. +% * __CComp__ retrieves all of the contours and organizes them into a +% two-level hierarchy. At the top level, there are external boundaries of +% the components. At the second level, there are boundaries of the holes. +% If there is another contour inside a hole of a connected component, it +% is still put at the top level. +% * __Tree__ retrieves all of the contours and reconstructs a full hierarchy +% of nested contours +% * __FloodFill__ connected components of a multi-level image (only valid +% for 32-bit integer images). % * __Method__ Contour approximation method, default is 'Simple'. One of: -% * __None__ stores absolutely all the contour points. That is, any 2 -% subsequent points `(x1,y1)` and `(x2,y2)` of the contour will be -% either horizontal, vertical or diagonal neighbors, that is, -% `max(abs(x1-x2),abs(y2-y1))==1`. -% * __Simple__ compresses horizontal, vertical, and diagonal segments and -% leaves only their end points. For example, an up-right rectangular -% contour is encoded with 4 points. -% * **TC89_L1**, **TC89_KCOS** applies one of the flavors of the Teh-Chin -% chain approximation algorithm [TehChin89] (1-curvature or -% k-cosine curvature). +% * __None__ stores absolutely all the contour points. That is, any 2 +% subsequent points `(x1,y1)` and `(x2,y2)` of the contour will be either +% horizontal, vertical or diagonal neighbors, that is, +% `max(abs(x1-x2),abs(y2-y1))==1`. +% * __Simple__ compresses horizontal, vertical, and diagonal segments and +% leaves only their end points. For example, an up-right rectangular +% contour is encoded with 4 points. +% * **TC89_L1**, **TC89_KCOS** applies one of the flavors of the Teh-Chin +% chain approximation algorithm [TehChin89] (1-curvature or k-cosine +% curvature). % * __Offset__ Optional offset by which every contour point is shifted. This -% is useful if the contours are extracted from the image ROI and then -% they should be analyzed in the whole image context. default [0,0] +% is useful if the contours are extracted from the image ROI and then they +% should be analyzed in the whole image context. default [0,0] % % The function retrieves contours from the binary image using the algorithm % [Suzuki85]. The contours are a useful tool for shape analysis and object diff --git a/+cv/findEssentialMat.m b/+cv/findEssentialMat.m index 9d2520c00..071c3d8c6 100644 --- a/+cv/findEssentialMat.m +++ b/+cv/findEssentialMat.m @@ -1,45 +1,43 @@ %FINDESSENTIALMAT Calculates an essential matrix from the corresponding points in two images % -% E = cv.findEssentialMat(points1, points2) -% [E, mask] = cv.findEssentialMat(...) -% [...] = cv.findEssentialMat(..., 'OptionName', optionValue, ...) +% E = cv.findEssentialMat(points1, points2) +% [E, mask] = cv.findEssentialMat(...) +% [...] = cv.findEssentialMat(..., 'OptionName', optionValue, ...) % % ## Input % * __points1__ Cell array of N (N>=5) 2D points from the first image, or numeric array -% Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point -% (single or double precision). +% Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point (single or +% double precision). % * __points2__ Cell array or numeric array of the second image points of the -% same size and format as `points1`. +% same size and format as `points1`. % % ## Output % * __E__ Essential matrix, 3x3. % * __mask__ Output vector of N elements, every element of which is set to 0 -% for outliers and to 1 for the other points (inliers). The array is -% computed only in the RANSAC and LMedS robust methods. +% for outliers and to 1 for the other points (inliers). The array is +% computed only in the RANSAC and LMedS robust methods. % % ## Options % * __CameraMatrix__ Camera matrix `K = [fx 0 cx; 0 fy cy; 0 0 1]`. Note that -% this function assumes that `points1` and `points2` are feature points -% from cameras with the same camera matrix. default `eye(3)`. +% this function assumes that `points1` and `points2` are feature points from +% cameras with the same camera matrix. default `eye(3)`. % * __Method__ Method for computing a essential matrix. One of: -% * __Ransac__ for the RANSAC algorithm. (default) -% * __LMedS__ for the LMedS algorithm. +% * __Ransac__ for the RANSAC algorithm. (default) +% * __LMedS__ for the LMedS algorithm. % * __Confidence__ Parameter used for the RANSAC or LMedS methods only. It -% specifies a desirable level of confidence (probability) that the -% estimated matrix is correct. In the range 0..1 exclusive. -% default 0.999 +% specifies a desirable level of confidence (probability) that the estimated +% matrix is correct. In the range 0..1 exclusive. default 0.999 % * __Threshold__ Parameter used for RANSAC. It is the maximum distance from a -% point to an epipolar line in pixels, beyond which the point is -% considered an outlier and is not used for computing the final -% essential matrix. It can be set to something like 1-3, depending -% on the accuracy of the point localization, image resolution, and -% the image noise. default 1.0 +% point to an epipolar line in pixels, beyond which the point is considered +% an outlier and is not used for computing the final essential matrix. It +% can be set to something like 1-3, depending on the accuracy of the point +% localization, image resolution, and the image noise. default 1.0 % % This function estimates essential matrix based on the five-point algorithm % solver in [Nister03]. [SteweniusCFS] is also a related. The epipolar % geometry is described by the following equation: % -% [p2;1]' * inv(K)' * E * inv(K) * [p1;1] = 0 +% [p2;1]' * inv(K)' * E * inv(K) * [p1;1] = 0 % % where `E` is an essential matrix, `p1` and `p2` are corresponding points in % the first and the second images, respectively. The result of this function @@ -49,18 +47,18 @@ % `K` is the camera matrix with focal length `fx` and `fy` and principal point % `[cx,cy]`: % -% K = [fx 0 cx; -% 0 fy cy; -% 0 0 1] +% K = [fx 0 cx; +% 0 fy cy; +% 0 0 1] % % ## Example % Estimation of essential matrix using the RANSAC algorithm: % -% % initialize the points here -% points1 = {[1,1],[3,1],[5,1],...} -% points2 = {[2,3],[4,3],[6,3],...} -% % estimate essential matrix -% [E, mask] = cv.findEssentialMat(points1, points2, 'Method','Ransac'); +% % initialize the points here +% points1 = {[1,1],[3,1],[5,1],...} +% points2 = {[2,3],[4,3],[6,3],...} +% % estimate essential matrix +% [E, mask] = cv.findEssentialMat(points1, points2, 'Method','Ransac'); % % ## References % [Nister03]: diff --git a/+cv/findFundamentalMat.m b/+cv/findFundamentalMat.m index 1dd24df3f..bf2045bdc 100644 --- a/+cv/findFundamentalMat.m +++ b/+cv/findFundamentalMat.m @@ -1,40 +1,39 @@ %FINDFUNDAMENTALMAT Calculates a fundamental matrix from the corresponding points in two images % -% F = cv.findFundamentalMat(points1, points2) -% [F, mask] = cv.findFundamentalMat(...) -% [...] = cv.findFundamentalMat(..., 'OptionName', optionValue, ...) +% F = cv.findFundamentalMat(points1, points2) +% [F, mask] = cv.findFundamentalMat(...) +% [...] = cv.findFundamentalMat(..., 'OptionName', optionValue, ...) % % ## Input % * __points1__ Cell array of N points from the first image, or numeric array -% Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point -% (single or double precision). +% Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point (single or +% double precision). % * __points2__ Cell array or numeric array of the second image points of the -% same size and format as `points1`. +% same size and format as `points1`. % % ## Output % * __F__ Fundamental matrix, 3x3 (or 9x3 in some cases, see below). % * __mask__ Optional output mask set by a robust method (RANSAC or LMEDS), -% indicates inliers. Vector of same length as number of points. +% indicates inliers. Vector of same length as number of points. % % ## Options % * __Method__ Method for computing a fundamental matrix. One of: -% * __7Point__ for a 7-point algorithm. `N = 7`. -% * __8Point__ for an 8-point algorithm. `N >= 8`. -% * __Ransac__ for the RANSAC algorithm. `N >= 8`. (default) -% * __LMedS__ for the LMedS least-median-of-squares algorithm. `N >= 8`. +% * __7Point__ for a 7-point algorithm. `N = 7`. +% * __8Point__ for an 8-point algorithm. `N >= 8`. +% * __Ransac__ for the RANSAC algorithm. `N >= 8`. (default) +% * __LMedS__ for the LMedS least-median-of-squares algorithm. `N >= 8`. % * __Param1__ Parameter used for RANSAC. It is the maximum distance from a -% point to an epipolar line in pixels, beyond which the point is -% considered an outlier and is not used for computing the final -% fundamental matrix. It can be set to something like 1-3, depending -% on the accuracy of the point localization, image resolution, and -% the image noise. default 3.0 +% point to an epipolar line in pixels, beyond which the point is considered +% an outlier and is not used for computing the final fundamental matrix. It +% can be set to something like 1-3, depending on the accuracy of the point +% localization, image resolution, and the image noise. default 3.0 % * __Param2__ Parameter used for the RANSAC or LMedS methods only. It -% specifies a desirable level of confidence (probability) that the -% estimated matrix is correct. In the range 0..1 exclusive. default 0.99 +% specifies a desirable level of confidence (probability) that the estimated +% matrix is correct. In the range 0..1 exclusive. default 0.99 % % The epipolar geometry is described by the following equation: % -% [p2;1]^T * F * [p1;1] = 0 +% [p2;1]^T * F * [p1;1] = 0 % % where `F` is a fundamental matrix, `p1` and `p2` are corresponding points in % the first and the second images, respectively. @@ -53,11 +52,11 @@ % ## Example % Estimation of fundamental matrix using the RANSAC algorithm: % -% % initialize the points here -% points1 = {[1,1],[3,1],[5,1],...} -% points2 = {[2,3],[4,3],[6,3],...} -% % estimate fundamental matrix -% [F, mask] = cv.findFundamentalMat(points1, points2, 'Method','Ransac'); +% % initialize the points here +% points1 = {[1,1],[3,1],[5,1],...} +% points2 = {[2,3],[4,3],[6,3],...} +% % estimate fundamental matrix +% [F, mask] = cv.findFundamentalMat(points1, points2, 'Method','Ransac'); % % See also: cv.computeCorrespondEpilines, cv.stereoRectifyUncalibrated, % estimateFundamentalMatrix diff --git a/+cv/findHomography.m b/+cv/findHomography.m index ad3997423..336bfcfd2 100644 --- a/+cv/findHomography.m +++ b/+cv/findHomography.m @@ -1,50 +1,49 @@ %FINDHOMOGRAPHY Finds a perspective transformation between two planes % -% H = cv.findHomography(srcPoints, dstPoints) -% [H, mask] = cv.findHomography(...) -% [...] = cv.findHomography(..., 'OptionName', optionValue, ...) +% H = cv.findHomography(srcPoints, dstPoints) +% [H, mask] = cv.findHomography(...) +% [...] = cv.findHomography(..., 'OptionName', optionValue, ...) % % ## Input % * __srcPoints__ Coordinates of the points in the original plane, a numeric -% array of size Nx2/1xNx2/Nx1x2 or cell array of 2-elements vectors -% `{[x,y], ...}` (single floating-point precision). +% array of size Nx2/1xNx2/Nx1x2 or cell array of 2-elements vectors +% `{[x,y], ...}` (single floating-point precision). % * __dstPoints__ Coordinates of the points in the target plane, of same size -% and type as `srcPoints`. +% and type as `srcPoints`. % % ## Output % * __H__ 3x3 Homography matrix. % * __mask__ Nx1 mask array of same length as input points, indicates inliers -% (which points were actually used in the best computation of `H`). +% (which points were actually used in the best computation of `H`). % % ## Options % * __Method__ Method used to computed a homography matrix. The following -% methods are possible: -% * __0__ a regular method using all the points. (default) -% * __Ransac__ RANSAC-based robust method. -% * __LMedS__ Least-Median robust method. -% * __Rho__ PROSAC-based robust method, introduced in [Bazargani15]. -% (weighted RANSAC modification, faster in the case of many -% outliers). +% methods are possible: +% * __0__ a regular method using all the points. (default) +% * __Ransac__ RANSAC-based robust method. +% * __LMedS__ Least-Median robust method. +% * __Rho__ PROSAC-based robust method, introduced in [Bazargani15]. +% (weighted RANSAC modification, faster in the case of many outliers). % * __RansacReprojThreshold__ Maximum allowed reprojection error to treat a -% point pair as an inlier (used in the RANSAC and RHO methods only). -% That is, if -% `|| dstPoints_i - convertPointsToHomogeneous(H*srcPoints_i) || > RansacReprojThreshold` -% then the point `i` is considered an outlier. If `srcPoints` and -% `dstPoints` are measured in pixels, it usually makes sense to set -% this parameter somewhere in the range of 1 to 10. default 3.0. +% point pair as an inlier (used in the RANSAC and RHO methods only). That +% is, if +% `|| dstPoints_i - convertPointsToHomogeneous(H*srcPoints_i) || > RansacReprojThreshold` +% then the point `i` is considered an outlier. If `srcPoints` and `dstPoints` +% are measured in pixels, it usually makes sense to set this parameter +% somewhere in the range of 1 to 10. default 3.0. % * __MaxIters__ The maximum number of RANSAC iterations, 2000 is the maximum -% it can be. default 2000 +% it can be. default 2000 % * __Confidence__ Confidence level, between 0 and 1. default 0.995 % % The function finds and returns the perspective transformation `H` between % the source and the destination planes: % -% s_i * [x_i'; y_i'; 1] ~ H * [x_i; y_i; 1] +% s_i * [x_i'; y_i'; 1] ~ H * [x_i; y_i; 1] % % so that the back-projection error: % -% sum_{i} (x_i' - (h11*x_i + h12*y_i + h13)/(h31*x_i + h32*y_i + h33))^2 + -% (y_i' - (h21*x_i + h22*y_i + h23)/(h31*x_i + h32*y_i + h33))^2 +% sum_{i} (x_i' - (h11*x_i + h12*y_i + h13)/(h31*x_i + h32*y_i + h33))^2 + +% (y_i' - (h21*x_i + h22*y_i + h23)/(h31*x_i + h32*y_i + h33))^2 % % is minimized. If the parameter method is set to the default value 0, the % function uses all the point pairs to compute an initial homography estimate diff --git a/+cv/findTransformECC.m b/+cv/findTransformECC.m index adfd4afe3..fa136e254 100644 --- a/+cv/findTransformECC.m +++ b/+cv/findTransformECC.m @@ -1,14 +1,14 @@ %FINDTRANSFORMECC Finds the geometric transform (warp) between two images in terms of the ECC criterion % -% warpMatrix = cv.findTransformECC(templateImage, inputImage) -% [warpMatrix,rho] = cv.findTransformECC(templateImage, inputImage) -% [...] = cv.findTransformECC(..., 'OptionName',optionValue, ...) +% warpMatrix = cv.findTransformECC(templateImage, inputImage) +% [warpMatrix,rho] = cv.findTransformECC(templateImage, inputImage) +% [...] = cv.findTransformECC(..., 'OptionName',optionValue, ...) % % ## Input % * __templateImage__ single-channel template image; `uint8` or `single` array. % * __inputImage__ single-channel input image which should be warped with the -% final `warpMatrix` in order to provide an image similar to -% `templateImage`, same type as `templateImage`. +% final `warpMatrix` in order to provide an image similar to `templateImage`, +% same type as `templateImage`. % % ## Output % * __warpMatrix__ floating-point 2x3 or 3x3 mapping matrix (warp). @@ -16,34 +16,34 @@ % % ## Options % * __MotionType__ parameter, specifying the type of motion: -% * __Translation__ sets a translational motion model; `warpMatrix` is -% 2x3 with the first 2x2 part being the unity matrix and the rest -% two parameters being estimated. -% * __Euclidean__ sets a Euclidean (rigid) transformation as motion -% model; three parameters are estimated; `warpMatrix` is 2x3. -% * __Affine__ sets an affine motion model (DEFAULT); six parameters are -% estimated; `warpMatrix` is 2x3. -% * __Homography__ sets a homography as a motion model; eight parameters -% are estimated; `warpMatrix` is 3x3. +% * __Translation__ sets a translational motion model; `warpMatrix` is 2x3 +% with the first 2x2 part being the unity matrix and the rest two +% parameters being estimated. +% * __Euclidean__ sets a Euclidean (rigid) transformation as motion model; +% three parameters are estimated; `warpMatrix` is 2x3. +% * __Affine__ sets an affine motion model (DEFAULT); six parameters are +% estimated; `warpMatrix` is 2x3. +% * __Homography__ sets a homography as a motion model; eight parameters are +% estimated; `warpMatrix` is 3x3. % * __Criteria__ parameter, specifying the termination criteria of the ECC -% algorithm; `Criteria.epsilon` defines the threshold of the increment -% in the correlation coefficient between two iterations (a negative -% `Criteria.epsilon` makes `Criteria.maxcount` the only termination -% criterion). Default values are: -% `struct('type','Count+EPS', 'maxCount',50, 'epsilon',0.001)` +% algorithm; `Criteria.epsilon` defines the threshold of the increment in +% the correlation coefficient between two iterations (a negative +% `Criteria.epsilon` makes `Criteria.maxcount` the only termination +% criterion). Default values are: +% `struct('type','Count+EPS', 'maxCount',50, 'epsilon',0.001)` % * __Mask__ An optional mask to indicate valid values of `inputImage`. -% Not set by default. -% * __InputWarp__ Initial estimate for `warpMatrix`. See the notes -% below. Default `eye(2,3)` or `eye(3,3)` depending on motion type. +% Not set by default. +% * __InputWarp__ Initial estimate for `warpMatrix`. See the notes below. +% Default `eye(2,3)` or `eye(3,3)` depending on motion type. % % The function estimates the optimum transformation (`warpMatrix`) with % respect to ECC criterion ([EP08]), that is: % -% warpMatrix = argmax_{W} ECC(templateImage(x,y), inputImage(x',y')) +% warpMatrix = argmax_{W} ECC(templateImage(x,y), inputImage(x',y')) % % where: % -% [x';y'] = W * [x;y;1] +% [x';y'] = W * [x;y;1] % % (the equation holds with homogeneous coordinates for homography). It returns % the final enhanced correlation coefficient, that is the correlation diff --git a/+cv/fitEllipse.m b/+cv/fitEllipse.m index 378f23576..7d3b68ade 100644 --- a/+cv/fitEllipse.m +++ b/+cv/fitEllipse.m @@ -1,18 +1,27 @@ %FITELLIPSE Fits an ellipse around a set of 2D points % -% rct = cv.fitEllipse(points) +% rct = cv.fitEllipse(points) +% rct = cv.fitEllipse(points, 'OptionName',optionValue, ...) % % ## Input % * __points__ Input 2D point set, stored in numeric array -% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% (Nx2/Nx1x2/1xNx2) or cell array of 2-element vectors (`{[x,y], ...}`). +% There should be at least 5 points to fit the ellipse. % % ## Output % * __rct__ Output rotated rectangle struct with the following fields: -% * __center__ The rectangle mass center `[x,y]`. -% * __size__ Width and height of the rectangle `[w,h]`. -% * __angle__ The rotation angle in a clockwise direction. -% When the angle is 0, 90, 180, 270 etc., the -% rectangle becomes an up-right rectangle. +% * __center__ The rectangle mass center `[x,y]`. +% * __size__ Width and height of the rectangle `[w,h]`. +% * __angle__ The rotation angle in a clockwise direction. When the angle is +% 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +% +% ## Options +% * __Method__ One of: +% * __Linear__ Linear (LIN) conic fitting method. This is the default. +% * __Direct__ Direct least square (DLS) method. +% * __AMS__ Approximate mean square (AMS) method. +% +% ### Method = Linear % % The function calculates the ellipse that fits (in a least-squares sense) a % set of 2D points best of all. It returns the rotated rectangle in which the @@ -21,11 +30,99 @@ % ellipse/rotatedRect data contains negative indices, due to the data points % being close to the border of the containing Mat element. % +% ### Method = Direct +% +% The function calculates the ellipse that fits a set of 2D points. +% It returns the rotated rectangle in which the ellipse is inscribed. +% The Direct least square (Direct) method by [Fitzgibbon1999] is used. +% +% For an ellipse, this basis set is `chi = (x^2, x*y, y^2, x, y, 1)`, which is +% a set of six free coefficients `A^T = {A_xx, A_xy, A_yy, A_x, A_y, A_0}`. +% However, to specify an ellipse, all that is needed is five numbers; the +% major and minor axes lengths `(a,b)`, the position `(x_0,y_0)`, and the +% orientation `theta`. This is because the basis set includes lines, +% quadratics, parabolic and hyperbolic functions as well as elliptical +% functions as possible fits. +% +% The Direct method confines the fit to ellipses by ensuring that +% `4*A_xx*A_yy - A_xy^2 > 0`. The condition imposed is that +% `4*A_xx*A_yy - A_xy^2 = 1` which satisfies the inequality and as the +% coefficients can be arbitrarily scaled is not overly restrictive. +% +% epsilon^2 = A^T * D^T * D * A +% with A^T * C * A = 1 +% and C = [0 0 2 0 0 0; 0 -1 0 0 0 0; 2 0 0 0 0 0; 0 0 0 0 0 0; 0 0 0 0 0 0; 0 0 0 0 0 0] +% +% The minimum cost is found by solving the generalized eigenvalue problem. +% +% D^T * D * A = lambda * (C) * A +% +% The system produces only one positive eigenvalue `lambda` which is chosen as +% the solution with its eigenvector `u`. These are used to find the +% coefficients: +% +% A = sqrt(1 / (u^T * C * u)) * u +% +% The scaling factor guarantees that `A^T * C * A = 1`. +% +% Note: If the determinant of `A` is too small, the method fallsback to +% 'Linear'. +% +% ### Method = AMS +% +% The function calculates the ellipse that fits a set of 2D points. +% It returns the rotated rectangle in which the ellipse is inscribed. +% The Approximate Mean Square (AMS) proposed by [Taubin1991] is used. +% +% For an ellipse, this basis set is `chi = (x^2, x*y, y^2, x, y, 1)`, which is +% a set of six free coefficients `A^T = {A_xx, A_xy, A_yy, A_x, A_y, A_0}`. +% However, to specify an ellipse, all that is needed is five numbers; the +% major and minor axes lengths `(a,b)`, the position `(x_0,y_0)`, and the +% orientation `theta`. This is because the basis set includes lines, +% quadratics, parabolic and hyperbolic functions as well as elliptical +% functions as possible fits. +% +% If the fit is found to be a parabolic or hyperbolic function then the +% 'Direct' method is used. The AMS method restricts the fit to parabolic, +% hyperbolic and elliptical curves by imposing the condition that +% `A^T * (D_x^T * D_x + D_y^T * D_y) * A = 1` where the matrices `Dx` and `Dy` +% are the partial derivatives of the design matrix `D` with respect to x and y. +% The matrices are formed row by row applying the following to each of the +% points in the set: +% +% D(i,:) = {x_i^2, x_i y_i, y_i^2, x_i, y_i, 1} +% D_x(i,:) = {2*x_i, y_i, 0, 1, 0, 0} +% D_y(i,:) = {0, x_i, 2*y_i, 0, 1, 0} +% +% The AMS method minimizes the cost function +% +% epsilon^2 = (A^T * D^T * D * A) / (A^T * (D_x^T * D_x + D_y^T * D_y) * A^T) +% +% The minimum cost is found by solving the generalized eigenvalue problem. +% +% D^T * D * A = lambda * (D_x^T * D_x + D_y^T * D_y) * A +% +% Note: If the determinant of `A` is too small, the method fallsback to +% 'Linear'. +% % ## References % [Fitzgibbon95]: % > Andrew W Fitzgibbon and Robert B Fisher. % > "A buyer's guide to conic fitting". In Proceedings of the 6th British % > conference on Machine vision (Vol. 2), pages 513-522. BMVA Press, 1995. +% > [PDF](http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf) +% +% [Fitzgibbon1999]: +% > Andrew Fitzgibbon, Maurizio Pilu, and Robert B. Fisher. +% > "Direct least square fitting of ellipses". IEEE Transactions on Pattern +% > Analysis and Machine Intelligence, 21(5):476-480, 1999. +% > [PDF](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/ellipse-pami.pdf) +% +% [Taubin1991]: +% > Gabriel Taubin. "Estimation of planar curves, surfaces, and nonplanar +% > space curves defined by implicit equations with applications to edge and +% > range image segmentation". IEEE Transactions on Pattern Analysis and +% > Machine Intelligence, 13(11):1115-1138, 1991. % % See also: cv.minAreaRect % diff --git a/+cv/fitLine.m b/+cv/fitLine.m index 98d4541f2..2919aadcb 100644 --- a/+cv/fitLine.m +++ b/+cv/fitLine.m @@ -1,36 +1,35 @@ %FITLINE Fits a line to a 2D or 3D point set % -% lin = cv.fitLine(points) -% lin = cv.fitLine(points, 'OptionName',optionValue, ...) +% lin = cv.fitLine(points) +% lin = cv.fitLine(points, 'OptionName',optionValue, ...) % % ## Input -% * __points__ Input vector of 2D or 3D points. -% 2D points stored in a cell array of 2-element vectors in the form -% `{[x,y], [x,y], ...}` or a Nx2/Nx1x2/1xNx2 numeric array. -% 3D points stored in a cell array of 3-element vectors in the form -% `{[x,y,z], [x,y,z], ...}` or a Nx3/Nx1x3/1xNx3 numeric array. +% * __points__ Input vector of 2D or 3D points. 2D points stored in a cell +% array of 2-element vectors in the form `{[x,y], [x,y], ...}` or a +% Nx2/Nx1x2/1xNx2 numeric array. 3D points stored in a cell array of +% 3-element vectors in the form `{[x,y,z], [x,y,z], ...}` or a +% Nx3/Nx1x3/1xNx3 numeric array. % % ## Output % * __lin__ Output line parameters. In case of 2D fitting, it is a vector of -% 4-elements vector `[vx,vy, x0,y0]`, where `[vx,vy]` is a normalized -% vector collinear to the line and `[x0,y0]` is a point on the line. In -% case of 3D fitting, it is a 6-elements vector `[vx,vy,vz, x0,y0,z0]`, -% where `[vx,vy,vz]` is a normalized vector collinear to the line and -% `[x0,y0,z0]` is a point on the line. +% 4-elements vector `[vx,vy, x0,y0]`, where `[vx,vy]` is a normalized vector +% collinear to the line and `[x0,y0]` is a point on the line. In case of 3D +% fitting, it is a 6-elements vector `[vx,vy,vz, x0,y0,z0]`, where +% `[vx,vy,vz]` is a normalized vector collinear to the line and `[x0,y0,z0]` +% is a point on the line. % % ## Options -% * __DistType__ Distance used by the M-estimator (see explanation below). -% Default 'L2'. One of: -% * __L2__ -% * __L1__ -% * __L12__ -% * __Fair__ -% * __Welsch__ -% * __Huber__ +% * __DistType__ Distance used by the M-estimator (see explanation below): +% * __L2__ (default) +% * __L1__ +% * __L12__ +% * __Fair__ +% * __Welsch__ +% * __Huber__ % * __Param__ Numerical parameter (`C`) for some types of distances. If it is -% 0, an optimal value is chosen. default 0. +% 0, an optimal value is chosen. default 0. % * __RadiusEps__ Sufficient accuracy for the radius (distance between the -% coordinate origin and the line). default 0.01 +% coordinate origin and the line). default 0.01 % * __AngleEps__ Sufficient accuracy for the angle. default 0.01 % % The function cv.fitLine fits a line to a 2D or 3D point set by minimizing @@ -39,33 +38,35 @@ % % * __L2__ % -% rho(r) = r^2/2 (the simplest and the fastest least-squares method) +% rho(r) = r^2/2 (the simplest and the fastest least-squares method) % % * __L1__ % -% rho(r) = r +% rho(r) = r % % * __L12__ % -% rho(r) = 2 * (sqrt(1+r^2/2) - 1) +% rho(r) = 2 * (sqrt(1+r^2/2) - 1) % % * __Fair__ % -% rho(r) = C^2 * (r/C - log(1 + r/c)), where C = 1.3998 +% rho(r) = C^2 * (r/C - log(1 + r/c)), where C = 1.3998 % % * __Welsch__ % -% rho(r) = C^2/2 * (1 - exp(-(r/c)^2)), where C = 2.9846 +% rho(r) = C^2/2 * (1 - exp(-(r/c)^2)), where C = 2.9846 % % * __Huber__ % -% | r^2/2 if r 0 -% { src(size(src,1)-i-1,size(src,2)-j-1) if flipCode < 0 +% { src(size(src,1)-i-1,j) if flipCode = 0 +% dst(i,j) = { src(i,size(src,2)-j-1) if flipCode > 0 +% { src(size(src,1)-i-1,size(src,2)-j-1) if flipCode < 0 % % The example scenarios of using the function are the following: % diff --git a/+cv/floodFill.m b/+cv/floodFill.m index 0c81026e2..5c40e4f58 100644 --- a/+cv/floodFill.m +++ b/+cv/floodFill.m @@ -1,62 +1,61 @@ %FLOODFILL Fills a connected component with the given color % -% [dst, rect, area] = cv.floodFill(src, seed, newVal) -% [dst, rect, area, mask] = cv.floodFill(..., 'Mask',mask, 'MaskOnly',true) -% [...] = cv.floodFill(..., 'OptionName',optionValue, ...) +% [dst, rect, area] = cv.floodFill(src, seed, newVal) +% [dst, rect, area, mask] = cv.floodFill(..., 'Mask',mask, 'MaskOnly',true) +% [...] = cv.floodFill(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 1- or 3-channel, 8-bit, or floating-point image. % * __seed__ Starting seed point `[x,y]`. % * __newVal__ New value of the repainted domain pixels. It should match the -% range and dimensions of the input image: 1-element vector (brightness) -% for grayscale images, and 3-element vector (color) for RGB images. +% range and dimensions of the input image: 1-element vector (brightness) +% for grayscale images, and 3-element vector (color) for RGB images. % % ## Output % * __dst__ Destination image of the same size and the same type as `src`. -% Contains the modified input unless the `MaskOnly` flag is set in the -% second variant of the function, in which case `dst` is the same as the -% input `src` unmodified. See the details below. +% Contains the modified input unless the `MaskOnly` flag is set in the +% second variant of the function, in which case `dst` is the same as the +% input `src` unmodified. See the details below. % * __rect__ Optional output parameter set by the function to the minimum -% bounding rectangle of the repainted domain `[x,y,w,h]`. +% bounding rectangle of the repainted domain `[x,y,w,h]`. % * __area__ Optional output parameter set by the function to the number of -% filled pixels. +% filled pixels. % * __mask__ Optional output containing the updated input `Mask`. Populated in -% the second variant of the function with the `Mask` option. On output, -% pixels in the mask corresponding to filled pixels in the image are set -% to 1 or to the value specified in `MaskFillValue` option as described -% below. +% the second variant of the function with the `Mask` option. On output, +% pixels in the mask corresponding to filled pixels in the image are set +% to 1 or to the value specified in `MaskFillValue` option as described +% below. % % ## Options % * __LoDiff__ Maximal lower brightness/color difference between the currently -% observed pixel and one of its neighbors belonging to the component, or -% a seed pixel being added to the component. default zeros +% observed pixel and one of its neighbors belonging to the component, or a +% seed pixel being added to the component. default zeros % * __UpDiff__ Maximal upper brightness/color difference between the currently -% observed pixel and one of its neighbors belonging to the component, or -% a seed pixel being added to the component. default zeros +% observed pixel and one of its neighbors belonging to the component, or a +% seed pixel being added to the component. default zeros % * __Connectivity__ Connectivity value, 4 or 8. The default value of 4 means -% that only the four nearest neighbor pixels (those that share an edge -% are considered. A connectivity value of 8 means that the eight nearest -% neighbor pixels (those that share a corner) will be considered. -% default 4 -% * __FixedRange__ If set, the difference between the current pixel and -% seed pixel is considered. Otherwise, the difference between -% neighbor pixels is considered (that is, the range is -% floating). This determines whether to fill relative to the seed point -% pixel, or to fill relative the neighbor's value). default false -% * __Mask__ Operation mask that should be a single-channel 8-bit image, -% 2 pixels wider and 2 pixels taller than image. Flood-filling cannot go -% across non-zero pixels in the input mask. For example, an edge -% detector output can be used as a mask to stop filling at edges. It is -% possible to use the same mask in multiple calls to the function to -% make sure the filled areas do not overlap. Not set by default. +% that only the four nearest neighbor pixels (those that share an edge are +% considered. A connectivity value of 8 means that the eight nearest +% neighbor pixels (those that share a corner) will be considered. default 4 +% * __FixedRange__ If set, the difference between the current pixel and seed +% pixel is considered. Otherwise, the difference between neighbor pixels is +% considered (that is, the range is floating). This determines whether to +% fill relative to the seed point pixel, or to fill relative the neighbor's +% value). default false +% * __Mask__ Operation mask that should be a single-channel 8-bit image, 2 +% pixels wider and 2 pixels taller than image. Flood-filling cannot go +% across non-zero pixels in the input mask. For example, an edge detector +% output can be used as a mask to stop filling at edges. It is possible to +% use the same mask in multiple calls to the function to make sure the +% filled areas do not overlap. Not set by default. % * __MaskOnly__ If set, the function does not change the image in the output -% (`newVal` is ignored), and only fills the output `mask` with the value -% specified in `MaskFillValue` as described. This option only make sense -% in function variants that have the mask parameter. default false +% (`newVal` is ignored), and only fills the output `mask` with the value +% specified in `MaskFillValue` as described. This option only make sense in +% function variants that have the mask parameter. default false % * __MaskFillValue__ Value between 1 and 255 with which to fill the output -% `Mask`. This option only make sense in function variants that have the -% mask parameter. default 0 (which effectively flood-fills the mask by -% the default filling value of 1) +% `Mask`. This option only make sense in function variants that have the +% mask parameter. default 0 (which effectively flood-fills the mask by the +% default filling value of 1) % % The function cv.floodFill fills a connected component starting from the seed % point with the specified color. The connectivity is determined by the @@ -66,25 +65,25 @@ % % * in case of a grayscale image and floating range: % -% src(x',y') - LoDiff <= src(x,y) <= src(x',y') + UpDiff +% src(x',y') - LoDiff <= src(x,y) <= src(x',y') + UpDiff % % * in case of a grayscale image and fixed range: % -% src(seed.x,seed.y) - LoDiff <= src(x,y) <= src(seed.x,seed.y) + UpDiff +% src(seed.x,seed.y) - LoDiff <= src(x,y) <= src(seed.x,seed.y) + UpDiff % % * in case of a color image and floating range: -% (a conjuction over all channels of the same condition as before) +% (a conjuction over all channels of the same condition as before) % -% src(x',y',1) - LoDiff(1) <= src(x,y,1) <= src(x',y',1) + UpDiff(1), and -% src(x',y',2) - LoDiff(2) <= src(x,y,2) <= src(x',y',2) + UpDiff(2), and -% src(x',y',3) - LoDiff(3) <= src(x,y,3) <= src(x',y',3) + UpDiff(3) +% src(x',y',1) - LoDiff(1) <= src(x,y,1) <= src(x',y',1) + UpDiff(1), and +% src(x',y',2) - LoDiff(2) <= src(x,y,2) <= src(x',y',2) + UpDiff(2), and +% src(x',y',3) - LoDiff(3) <= src(x,y,3) <= src(x',y',3) + UpDiff(3) % % * in case of a color image and fixed range: -% (a conjuction over all channels of the same condition as before) +% (a conjuction over all channels of the same condition as before) % -% src(seed.x,seed.y,1) - LoDiff(1) <= src(x,y,1) <= src(seed.x,seed.y,1) + UpDiff(1), -% src(seed.x,seed.y,2) - LoDiff(2) <= src(x,y,2) <= src(seed.x,seed.y,2) + UpDiff(2), -% src(seed.x,seed.y,3) - LoDiff(3) <= src(x,y,3) <= src(seed.x,seed.y,3) + UpDiff(3) +% src(seed.x,seed.y,1) - LoDiff(1) <= src(x,y,1) <= src(seed.x,seed.y,1) + UpDiff(1), +% src(seed.x,seed.y,2) - LoDiff(2) <= src(x,y,2) <= src(seed.x,seed.y,2) + UpDiff(2), +% src(seed.x,seed.y,3) - LoDiff(3) <= src(x,y,3) <= src(seed.x,seed.y,3) + UpDiff(3) % % where `src(x',y')` is the value of one of pixel neighbors that is already % known to belong to the component. That is, to be added to the connected diff --git a/+cv/getAffineTransform.m b/+cv/getAffineTransform.m index 804b50928..16b9f18db 100644 --- a/+cv/getAffineTransform.m +++ b/+cv/getAffineTransform.m @@ -1,24 +1,24 @@ %GETAFFINETRANSFORM Calculates an affine transform from three pairs of corresponding points % -% T = cv.getAffineTransform(src, dst) +% T = cv.getAffineTransform(src, dst) % % ## Input % * __src__ Coordinates of triangle vertices in the source image. A numeric -% 3-by-2 row vectors or a cell-array of 2-element vectors of length 3 -% `{[x,y], [x,y], [x,y]}` +% 3-by-2 row vectors or a cell-array of 2-element vectors of length 3 +% `{[x,y], [x,y], [x,y]}` % * __dst__ Coordinates of the corresponding triangle vertices in the -% destination image. Same type and size as `src`. +% destination image. Same type and size as `src`. % % ## Output % * __T__ 2-by-3 affine transformation matrix % % The function calculates the 2x3 matrix of an affine transform so that: % -% [X_i; X_i] = T * [x_i; y_i; 1] +% [X_i; X_i] = T * [x_i; y_i; 1] % % where: % -% dst(i,:) = [X_i, Y_i], src(i,:) = [x_i, y_i] for i=1,2,3 +% dst(i,:) = [X_i, Y_i], src(i,:) = [x_i, y_i] for i=1,2,3 % % See also: cv.warpAffine, cv.transform, cp2tform, fitgeotrans, % estimateGeometricTransform diff --git a/+cv/getBuildInformation.m b/+cv/getBuildInformation.m index 57c52d082..76dd1f6fb 100644 --- a/+cv/getBuildInformation.m +++ b/+cv/getBuildInformation.m @@ -1,10 +1,12 @@ %GETBUILDINFORMATION Returns OpenCV build information % -% cv.getBuildInformation() -% info = cv.getBuildInformation() +% cv.getBuildInformation() +% info = cv.getBuildInformation() % % ## Output % * __info__ Build information string. % % The function prints/returns OpenCV build information % +% See also: cv.Utils.getBuildInformation, cv.Utils.version, ver, version +% diff --git a/+cv/getDefaultNewCameraMatrix.m b/+cv/getDefaultNewCameraMatrix.m index d40f202f4..2475d428a 100644 --- a/+cv/getDefaultNewCameraMatrix.m +++ b/+cv/getDefaultNewCameraMatrix.m @@ -1,7 +1,7 @@ %GETDEFAULTNEWCAMERAMATRIX Returns the default new camera matrix % -% newCameraMatrix = cv.getDefaultNewCameraMatrix(cameraMatrix) -% [...] = cv.getDefaultNewCameraMatrix(..., 'OptionName',optionValue, ...) +% newCameraMatrix = cv.getDefaultNewCameraMatrix(cameraMatrix) +% [...] = cv.getDefaultNewCameraMatrix(..., 'OptionName',optionValue, ...) % % ## Input % * __cameraMatrix__ Input camera matrix, a 3x3 double matrix. @@ -12,8 +12,8 @@ % ## Options % * __ImgSize__ Camera view image size in pixels `[w,h]`. Default [0,0] % * __CenterPrincipalPoint__ Location of the principal point in the new camera -% matrix. The parameter indicates whether this location should be at the -% image center or not. default false +% matrix. The parameter indicates whether this location should be at the +% image center or not. default false % % The function returns the camera matrix that is either an exact copy of the % input `cameraMatrix` (when `CenterPrinicipalPoint=false`), or the modified @@ -21,9 +21,9 @@ % % In the latter case, the new camera matrix will be: % -% [ fx, 0, (ImgSize(1)-1)*0.5 ; -% 0, fy, (ImgSize(2)-1)*0.5 ; -% 0, 0, 1 ] +% [ fx, 0, (ImgSize(1)-1)*0.5 ; +% 0, fy, (ImgSize(2)-1)*0.5 ; +% 0, 0, 1 ] % % where `fx` and `fy` are (0,0) and (1,1) elements of `cameraMatrix`, % respectively. diff --git a/+cv/getDerivKernels.m b/+cv/getDerivKernels.m index 6dfb00be3..a2bb2000c 100644 --- a/+cv/getDerivKernels.m +++ b/+cv/getDerivKernels.m @@ -1,25 +1,25 @@ %GETDERIVKERNELS Returns filter coefficients for computing spatial image derivatives % -% [kx, ky] = cv.getDerivKernels('OptionName', optionValue, ...) +% [kx, ky] = cv.getDerivKernels('OptionName', optionValue, ...) % % ## Output % * __kx__ Output matrix of row filter coefficients. It has the type `KType`. % * __ky__ Output matrix of column filter coefficients. It has the type -% `KType`. +% `KType`. % % ## Options % * __Dx__ Derivative order in respect of x. default 1 % * __Dy__ Derivative order in respect of y. default 1 % * __KSize__ Aperture size. It can be 'Scharr', 1, 3, 5, or 7. default 3. % * __Normalize__ Flag indicating whether to normalize (scale down) the filter -% coefficients or not. Theoretically, the coefficients should have the -% `denominator = 2^(KSize*2-Dx-Dy-2)`. If you are going to filter -% floating-point images, you are likely to use the normalized kernels. -% But if you compute derivatives of an 8-bit image, store the results in -% a 16-bit image, and wish to preserve all the fractional bits, you may -% want to set `Normalize=false`. default false +% coefficients or not. Theoretically, the coefficients should have the +% `denominator = 2^(KSize*2-Dx-Dy-2)`. If you are going to filter +% floating-point images, you are likely to use the normalized kernels. But +% if you compute derivatives of an 8-bit image, store the results in a +% 16-bit image, and wish to preserve all the fractional bits, you may want +% to set `Normalize=false`. default false % * __KType__ Type of filter coefficients. It can be `single` or `double`. -% default `single` +% default `single` % % The function computes and returns the filter coefficients for spatial image % derivatives. When `KSize='Scharr'`, the Scharr 3x3 kernels are generated diff --git a/+cv/getGaborKernel.m b/+cv/getGaborKernel.m index 91d0d4844..df008429b 100644 --- a/+cv/getGaborKernel.m +++ b/+cv/getGaborKernel.m @@ -1,6 +1,6 @@ %GETGABORKERNEL Returns Gabor filter coefficients % -% kernel = cv.getGaborKernel('OptionName',optionValue, ...) +% kernel = cv.getGaborKernel('OptionName',optionValue, ...) % % ## Output % * __kernel__ output kernel of the specified size and type. @@ -9,15 +9,15 @@ % * __KSize__ Size of the filter returned `[w,h]`. default [21,21] % * __Sigma__ Standard deviation of the gaussian envelope. default 5.0 % * __Theta__ Orientation of the normal to the parallel stripes of a Gabor -% function. default pi/4 +% function. default pi/4 % * __Lambda__ Wavelength of the sinusoidal factor. default 10.0 % * __Gamma__ Spatial aspect ratio. default 0.75 % * __Psi__ Phase offset. default pi/2 % * __KType__ Type of filter coefficients. It can be `single` or `double`. -% default `double`. +% default `double`. % % For more details about gabor filter equations and parameters, see -% [Gabor filter](http://en.wikipedia.org/wiki/Gabor_filter). +% [Gabor filter](https://en.wikipedia.org/wiki/Gabor_filter). % % ## Example % Try the `gabor_filter_gui.m` sample file to interact with the various diff --git a/+cv/getGaussianKernel.m b/+cv/getGaussianKernel.m index 462e83d35..2091bb26b 100644 --- a/+cv/getGaussianKernel.m +++ b/+cv/getGaussianKernel.m @@ -1,24 +1,24 @@ %GETGAUSSIANKERNEL Returns Gaussian filter coefficients % -% kernel = cv.getGaussianKernel() -% kernel = cv.getGaussianKernel('OptionName', optionValue, ...) +% kernel = cv.getGaussianKernel() +% kernel = cv.getGaussianKernel('OptionName', optionValue, ...) % % ## Output % * __kernel__ Output kernel of the specified size and type. % % ## Options % * __KSize__ Aperture size. It should be odd (`mod(KSize,2)==1`) and -% positive. default 5. +% positive. default 5. % * __Sigma__ Gaussian standard deviation. If it is non-positive, it is -% computed from `KSize` as `sigma = 0.3 * ((KSize-1) * 0.5 - 1) + 0.8`. -% default -1. +% computed from `KSize` as `sigma = 0.3 * ((KSize-1) * 0.5 - 1) + 0.8`. +% default -1. % * __KType__ Type of filter coefficients. It can be `single` or `double`. -% default `double`. +% default `double`. % % The function computes and returns the `KSize x 1` matrix of Gaussian filter % coefficients: % -% G_i = alpha * exp( -(i - (KSize-1)/2)^2 / (2*sigma^2) ) +% G_i = alpha * exp( -(i - (KSize-1)/2)^2 / (2*sigma^2) ) % % where `i = 0, ..., KSize-1` and `alpha` is the scale factor chosen so that % `sum_{i}(G_i) = 1`. diff --git a/+cv/getOptimalDFTSize.m b/+cv/getOptimalDFTSize.m index 0c2731a3f..912918d5e 100644 --- a/+cv/getOptimalDFTSize.m +++ b/+cv/getOptimalDFTSize.m @@ -1,6 +1,6 @@ %GETOPTIMALDFTSIZE Returns the optimal DFT size for a given vector size % -% N = cv.getOptimalDFTSize(vecsize) +% N = cv.getOptimalDFTSize(vecsize) % % ## Input % * __vecsize__ vector size. diff --git a/+cv/getOptimalNewCameraMatrix.m b/+cv/getOptimalNewCameraMatrix.m index 318b54781..117217cc1 100644 --- a/+cv/getOptimalNewCameraMatrix.m +++ b/+cv/getOptimalNewCameraMatrix.m @@ -1,45 +1,44 @@ %GETOPTIMALNEWCAMERAMATRIX Returns the new camera matrix based on the free scaling parameter % -% cameraMatrix = cv.getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize) -% [cameraMatrix, validPixROI] = cv.getOptimalNewCameraMatrix(...) -% [...] = cv.getOptimalNewCameraMatrix(..., 'OptionName', optionValue, ...) +% cameraMatrix = cv.getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize) +% [cameraMatrix, validPixROI] = cv.getOptimalNewCameraMatrix(...) +% [...] = cv.getOptimalNewCameraMatrix(..., 'OptionName', optionValue, ...) % % ## Input % * __cameraMatrix__ Input 3x3 camera matrix, `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. % * __imageSize__ Original image size `[w,h]`. % % ## Output % * __cameraMatrix__ Output new camera matrix, 3x3. % * __validPixROI__ Optional output rectangle `[x,y,w,h]` that outlines -% all-good-pixels region in the undistorted image. See `roi1`, `roi2` -% description in cv.stereoRectify. +% all-good-pixels region in the undistorted image. See `roi1`, `roi2` +% description in cv.stereoRectify. % % ## Options % * __Alpha__ Free scaling parameter between 0 (when all the pixels in the -% undistorted image are valid) and 1 (when all the source image -% pixels are retained in the undistorted image). See cv.stereoRectify -% for details. default 0.8 +% undistorted image are valid) and 1 (when all the source image pixels are +% retained in the undistorted image). See cv.stereoRectify for details. +% default 0.8 % * __NewImageSize__ Image size after rectification `[w,h]`. By default, it is -% set to `imageSize`. -% * __CenterPrincipalPoint__ Optional flag that indicates whether in the -% new camera matrix the principal point should be at the image -% center or not. By default, the principal point is chosen to best -% fit a subset of the source image (determined by `Alpha`) to the -% corrected image. default false. +% set to `imageSize`. +% * __CenterPrincipalPoint__ Optional flag that indicates whether in the new +% camera matrix the principal point should be at the image center or not. By +% default, the principal point is chosen to best fit a subset of the source +% image (determined by `Alpha`) to the corrected image. default false. % -% The function computes and returns the optimal new camera matrix based on -% the free scaling parameter. By varying this parameter, you may retrieve -% only sensible pixels `Alpha=0`, keep all the original image pixels if -% there is valuable information in the corners `Alpha=1`, or get something -% in between. When `Alpha>0`, the undistortion result is likely to have some -% black pixels corresponding to "virtual" pixels outside of the captured -% distorted image. The original camera matrix, distortion coefficients, -% the computed new camera matrix, and `newImageSize` should be passed to -% cv.initUndistortRectifyMap to produce the maps for cv.remap. +% The function computes and returns the optimal new camera matrix based on the +% free scaling parameter. By varying this parameter, you may retrieve only +% sensible pixels `Alpha=0`, keep all the original image pixels if there is +% valuable information in the corners `Alpha=1`, or get something in between. +% When `Alpha>0`, the undistortion result is likely to have some black pixels +% corresponding to "virtual" pixels outside of the captured distorted image. +% The original camera matrix, distortion coefficients, the computed new camera +% matrix, and `newImageSize` should be passed to cv.initUndistortRectifyMap to +% produce the maps for cv.remap. % % See also: cv.stereoRectify, cv.initUndistortRectifyMap, cv.remap % diff --git a/+cv/getPerspectiveTransform.m b/+cv/getPerspectiveTransform.m index 0bf08d630..91bb9cd8b 100644 --- a/+cv/getPerspectiveTransform.m +++ b/+cv/getPerspectiveTransform.m @@ -1,24 +1,24 @@ %GETPERSPECTIVETRANSFORM Calculates a perspective transform from four pairs of the corresponding points % -% T = cv.getPerspectiveTransform(src, dst) +% T = cv.getPerspectiveTransform(src, dst) % % ## Input % * __src__ Coordinates of quadrangle vertices in the source image. A numeric -% 4-by-2 row vectors or a cell-array of 2-element vectors of length 4 -% `{[x,y], [x,y], [x,y], [x,y]}` +% 4-by-2 row vectors or a cell-array of 2-element vectors of length 4 +% `{[x,y], [x,y], [x,y], [x,y]}` % * __dst__ Coordinates of the corresponding quadrangle vertices in the -% destination image. Same type and size as `src`. +% destination image. Same type and size as `src`. % % ## Output % * __T__ 3-by-3 perspective transformation matrix % % The function calculates the 3x3 matrix of a perspective transform so that: % -% t_i * [X_i; Y_i; 1] = T * [x_i; y_i; 1] +% t_i * [X_i; Y_i; 1] = T * [x_i; y_i; 1] % % where: % -% dst(i,:) = [X_i, Y_i], src(i,:) = [x_i, y_i] for i=1,2,3 +% dst(i,:) = [X_i, Y_i], src(i,:) = [x_i, y_i] for i=1,2,3 % % See also: cv.findHomography, cv.warpPerspective, cv.perspectiveTransform, % cp2tform, fitgeotrans, estimateGeometricTransform diff --git a/+cv/getRectSubPix.m b/+cv/getRectSubPix.m index 42ebdfd0a..07883deb7 100644 --- a/+cv/getRectSubPix.m +++ b/+cv/getRectSubPix.m @@ -1,28 +1,28 @@ %GETRECTSUBPIX Retrieves a pixel rectangle from an image with sub-pixel accuracy % -% dst = cv.getRectSubPix(src, patchSize, center) -% dst = cv.getRectSubPix(..., 'OptionName',optionValue, ...) +% dst = cv.getRectSubPix(src, patchSize, center) +% dst = cv.getRectSubPix(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image, 8-bit integer or 32-bit floating-point, 1- or -% 3-channels. +% 3-channels. % * __patchSize__ Size of the extracted patch `[w,h]`. % * __center__ Floating-point coordinates of the center of the extracted -% rectangle within the source image. The center `[x,y]` must be inside -% the image. +% rectangle within the source image. The center `[x,y]` must be inside the +% image. % % ## Output -% * __dst__ Extracted patch that has the size `PatchSize`, the same number -% of channels as `src`, and the specified type in `PatchType`. +% * __dst__ Extracted patch that has the size `PatchSize`, the same number of +% channels as `src`, and the specified type in `PatchType`. % % ## Options % * __PatchType__ Depth of the extracted pixels. By default (-1), they have -% the same depth as `src`. Supports either `uint8` or `single`. +% the same depth as `src`. Supports either `uint8` or `single`. % % The function cv.getRectSubPix extracts pixels from `src`: % -% dst(x,y) = src(x + center(1) - (size(dst,2)-1)*0.5, -% y + center(2) - (size(dst,1)-1)*0.5) +% dst(x,y) = src(x + center(1) - (size(dst,2)-1)*0.5, +% y + center(2) - (size(dst,1)-1)*0.5) % % where the values of the pixels at non-integer coordinates are retrieved using % bilinear interpolation. Every channel of multi-channel images is processed diff --git a/+cv/getRotationMatrix2D.m b/+cv/getRotationMatrix2D.m index a818a9c5c..39ee64400 100644 --- a/+cv/getRotationMatrix2D.m +++ b/+cv/getRotationMatrix2D.m @@ -1,11 +1,11 @@ %GETROTATIONMATRIX2D Calculates an affine matrix of 2D rotation % -% M = cv.getRotationMatrix2D(center, angle, scale) +% M = cv.getRotationMatrix2D(center, angle, scale) % % ## Input % * __center__ Center of the rotation in the source image `[x,y]`. % * __angle__ Rotation angle in degrees. Positive values mean counter-clockwise -% rotation (the coordinate origin is assumed to be the top-left corner). +% rotation (the coordinate origin is assumed to be the top-left corner). % * __scale__ Isotropic scale factor. % % ## Output @@ -13,13 +13,13 @@ % % The function calculates the following matrix: % -% [ a, b, (1-a)*center(1) - b*center(2) ; -% -b, a, b*center(1) + (1-a)*center(2) ] +% [ a, b, (1-a)*center(1) - b*center(2) ; +% -b, a, b*center(1) + (1-a)*center(2) ] % % where: % -% a = scale * cos(angle) -% b = scale * sin(angle) +% a = scale * cos(angle) +% b = scale * sin(angle) % % The transformation maps the rotation center to itself. If this is not the % target, adjust the shift. diff --git a/+cv/getStructuringElement.m b/+cv/getStructuringElement.m index 7589b6db8..febccfbb3 100644 --- a/+cv/getStructuringElement.m +++ b/+cv/getStructuringElement.m @@ -1,25 +1,23 @@ %GETSTRUCTURINGELEMENT Returns a structuring element of the specified size and shape for morphological operations % -% elem = cv.getStructuringElement('OptionName', optionValue, ...) -% +% elem = cv.getStructuringElement('OptionName', optionValue, ...) % % ## Output % * __elem__ Output structuring element of specified shape and size. % % ## Options % * __Shape__ Element shape, default 'Rect'. Could be one of: -% * __Rect__ a rectangular structuring element: `E(i,j)=1` -% * __Cross__ a cross-shaped structuring element: `E(i,j)=1` if -% `i=Anchor(2)` or `j=Anchor(1)`, `E(i,j)=0` otherwise. -% * __Ellipse__ an elliptic structuring element, that is, a filled -% ellipse inscribed into the rectangle -% `[0, 0, KSize(1), KSize(2)]`. +% * __Rect__ a rectangular structuring element: `E(i,j)=1` +% * __Cross__ a cross-shaped structuring element: `E(i,j)=1` if +% `i=Anchor(2)` or `j=Anchor(1)`, `E(i,j)=0` otherwise. +% * __Ellipse__ an elliptic structuring element, that is, a filled ellipse +% inscribed into the rectangle `[0, 0, KSize(1), KSize(2)]`. % * __KSize__ Size of the structuring element `[w,h]`. default [3,3]. % * __Anchor__ Anchor position within the element. The default value (-1,-1) -% means that the anchor is at the center. Note that only the shape of a -% cross-shaped element depends on the anchor position. In other cases -% the anchor just regulates how much the result of the morphological -% operation is shifted. +% means that the anchor is at the center. Note that only the shape of a +% cross-shaped element depends on the anchor position. In other cases the +% anchor just regulates how much the result of the morphological operation +% is shifted. % % The function constructs and returns the structuring element that can be % further passed to cv.erode, cv.dilate or cv.morphologyEx. But you can also diff --git a/+cv/getTextSize.m b/+cv/getTextSize.m index caa1ddb65..7e2c5636d 100644 --- a/+cv/getTextSize.m +++ b/+cv/getTextSize.m @@ -1,7 +1,7 @@ %GETTEXTSIZE Calculates the width and height of a text string % -% [siz, baseLine] = cv.getTextSize(text) -% [...] = cv.getTextSize(..., 'OptionName', optionValue, ...) +% [siz, baseLine] = cv.getTextSize(text) +% [...] = cv.getTextSize(..., 'OptionName', optionValue, ...) % % ## Input % * __text__ Input text string. @@ -9,25 +9,25 @@ % ## Output % * __siz__ Size of a box that contains the specified text `[w,h]`. % * __baseLine__ y-coordinate of the baseline relative to the bottom-most -% text point. The baseline is the line on which the bottoms of -% characters such as 'a' and 'b' are aligned. Characters such as 'y' and -% 'g' hang below the baseline. +% text point. The baseline is the line on which the bottoms of characters +% such as 'a' and 'b' are aligned. Characters such as 'y' and 'g' hang below +% the baseline. % % ## Options % * __FontFace__ Font to use. One of the following: -% * __HersheySimplex__ (default) -% * __HersheyPlain__ -% * __HersheyDuplex__ -% * __HersheyComplex__ -% * __HersheyTriplex__ -% * __HersheyComplexSmall__ -% * __HersheyScriptSimplex__ -% * __HersheyScriptComplex__ +% * __HersheySimplex__ (default) +% * __HersheyPlain__ +% * __HersheyDuplex__ +% * __HersheyComplex__ +% * __HersheyTriplex__ +% * __HersheyComplexSmall__ +% * __HersheyScriptSimplex__ +% * __HersheyScriptComplex__ % * __FontStyle__ Font style. One of: -% * __Regular__ (default) -% * __Italic__ -% * __FontScale__ Font scale factor that is multiplied by the -% font-specific base size. default 1.0 +% * __Regular__ (default) +% * __Italic__ +% * __FontScale__ Font scale factor that is multiplied by the font-specific +% base size. default 1.0 % * __Thickness__ Thickness of lines used to render the text. default 1 % % The function cv.getTextSize calculates and returns the size of a box that diff --git a/+cv/getValidDisparityROI.m b/+cv/getValidDisparityROI.m index a658b3536..f54945f7b 100644 --- a/+cv/getValidDisparityROI.m +++ b/+cv/getValidDisparityROI.m @@ -1,26 +1,26 @@ %GETVALIDDISPARITYROI Computes valid disparity ROI from the valid ROIs of the rectified images % -% r = cv.getValidDisparityROI(roi1, roi2) -% r = cv.getValidDisparityROI(roi1, roi2, 'OptionName',optionValue, ...) +% r = cv.getValidDisparityROI(roi1, roi2) +% r = cv.getValidDisparityROI(roi1, roi2, 'OptionName',optionValue, ...) % % ## Input % * __roi1__, __roi2__ rectangles inside the rectified images where all the -% the pixels are valid `[x,y,w,h]` (as returned by cv.stereoRectify). +% the pixels are valid `[x,y,w,h]` (as returned by cv.stereoRectify). % % ## Output % * __r__ computed rectangle inside the disparity of valid ROI `[x,y,w,h]`. % % ## Options % * __MinDisparity__ Minimum possible disparity value. Normally, it is zero -% but sometimes rectification algorithms can shift images, so this -% parameter needs to be adjusted accordingly. default 0 +% but sometimes rectification algorithms can shift images, so this parameter +% needs to be adjusted accordingly. default 0 % * __NumDisparities__ Maximum disparity minus minimum disparity. The value is -% always greater than zero. default 64 +% always greater than zero. default 64 % * __BlockSize__ the linear size of the matched block size. The size should -% be odd (as the block is centered at the current pixel). Larger block -% size implies smoother, though less accurate disparity map. Smaller -% block size gives more detailed disparity map, but there is is higher -% chance for algorithm to find a wrong correspondence. default 21. +% be odd (as the block is centered at the current pixel). Larger block size +% implies smoother, though less accurate disparity map. Smaller block size +% gives more detailed disparity map, but there is is higher chance for +% algorithm to find a wrong correspondence. default 21. % % See also: cv.stereoRectify, cv.StereoBM, cv.StereoSGBM, cv.filterSpeckles, % cv.validateDisparity diff --git a/+cv/glob.m b/+cv/glob.m index 7b9e04e8e..1157da8cd 100644 --- a/+cv/glob.m +++ b/+cv/glob.m @@ -1,18 +1,18 @@ %GLOB Find all pathnames matching a specified pattern % -% result = cv.glob(pattern) -% result = cv.glob(pattern, 'OptionName',optionValue, ...) +% result = cv.glob(pattern) +% result = cv.glob(pattern, 'OptionName',optionValue, ...) % % ## Input -% * __pattern__ Pathname either absolute or relative, and can -% contain wildcard characters (e.g 'Test*.m') +% * __pattern__ Pathname either absolute or relative, and can contain wildcard +% characters (e.g 'Test*.m') % % ## Output % * __result__ output sorted matched pathnames. Cell array of strings. % % ## Options % * __Recursive__ If true, search files in subdirectories as well. -% default false +% default false % % See also: dir % diff --git a/+cv/goodFeaturesToTrack.m b/+cv/goodFeaturesToTrack.m index 78771addb..9fc8f7f3c 100644 --- a/+cv/goodFeaturesToTrack.m +++ b/+cv/goodFeaturesToTrack.m @@ -1,39 +1,40 @@ %GOODFEATURESTOTRACK Determines strong corners on an image % -% corners = cv.goodFeaturesToTrack(image) -% corners = cv.goodFeaturesToTrack(image, 'OptionName', optionValue, ...) +% corners = cv.goodFeaturesToTrack(image) +% corners = cv.goodFeaturesToTrack(image, 'OptionName', optionValue, ...) % % ## Input % * __image__ Input 8-bit or floating-point 32-bit, single-channel image. % % ## Output % * __corners__ Output vector of detected corners. A cell array of 2-elements -% vectors `{[x,y], ...}`. +% vectors `{[x,y], ...}`. % % ## Options % * __MaxCorners__ Maximum number of corners to return. If there are more -% corners than are found, the strongest of them is returned. -% default 1000. `MaxCorners <= 0` implies that no limit on the maximum -% is set and all detected corners are returned. +% corners than are found, the strongest of them is returned. default 1000. +% `MaxCorners <= 0` implies that no limit on the maximum is set and all +% detected corners are returned. % * __QualityLevel__ Parameter characterizing the minimal accepted quality of -% image corners. The parameter value is multiplied by the best corner -% quality measure, which is the minimal eigenvalue (see -% cv.cornerMinEigenVal) or the Harris function response (see -% cv.cornerHarris). The corners with the quality measure less than the -% product are rejected. For example, if the best corner has the quality -% measure = 1500, and the `QualityLevel=0.01`, then all the corners with -% the quality measure less than 15 are rejected. default 0.01 +% image corners. The parameter value is multiplied by the best corner +% quality measure, which is the minimal eigenvalue (see cv.cornerMinEigenVal) +% or the Harris function response (see cv.cornerHarris). The corners with +% the quality measure less than the product are rejected. For example, if +% the best corner has the quality measure = 1500, and the +% `QualityLevel=0.01`, then all the corners with the quality measure less +% than 15 are rejected. default 0.01 % * __MinDistance__ Minimum possible Euclidean distance between the returned -% corners. default 2.0 +% corners. default 2.0 % * __Mask__ Optional region of interest. If the image is not empty (it needs -% to have the type `uint8`/`logical` and the same size as `image`), it -% specifies the region in which the corners are detected. It is not set -% by default. +% to have the type `uint8`/`logical` and the same size as `image`), it +% specifies the region in which the corners are detected. It is not set by +% default. % * __BlockSize__ Size of an average block for computing a derivative -% covariation matrix over each pixel neighborhood. See -% cv.cornerEigenValsAndVecs. default 3 +% covariation matrix over each pixel neighborhood. See +% cv.cornerEigenValsAndVecs. default 3 +% * __GradientSize__ Gradient window size to use on the input. default 3 % * __UseHarrisDetector__ Parameter indicating whether to use a Harris detector -% (see cv.cornerHarris) or cv.cornerMinEigenVal, default false +% (see cv.cornerHarris) or cv.cornerMinEigenVal, default false % * __K__ Free parameter of the Harris detector. default 0.04 % % The function finds the most prominent corners in the image or in the specified diff --git a/+cv/grabCut.m b/+cv/grabCut.m index d6971279d..5176a3915 100644 --- a/+cv/grabCut.m +++ b/+cv/grabCut.m @@ -1,52 +1,50 @@ %GRABCUT Runs the GrabCut algorithm % -% mask = cv.grabCut(img, rect) -% mask = cv.grabCut(img, mask) -% [mask, bgdmodel, fgdmodel] = cv.grabCut(...) -% [...] = cv.grabCut(..., 'OptionName', optionValue, ...) +% mask = cv.grabCut(img, rect) +% mask = cv.grabCut(img, mask) +% [mask, bgdmodel, fgdmodel] = cv.grabCut(...) +% [...] = cv.grabCut(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Input 8-bit 3-channel image. % * __rect__ ROI containing a segmented object. A 1-by-4 vector `[x y w h]`. -% It will automatically create the `mask`. The pixels outside of the ROI -% are marked as "obvious background" with label 0, and label 3 for -% foreground (see `mask`). Using this variant will set `Mode` to -% 'InitWithRect'. +% It will automatically create the `mask`. The pixels outside of the ROI are +% marked as "obvious background" with label 0, and label 3 for foreground +% (see `mask`). Using this variant will set `Mode` to 'InitWithRect'. % * __mask__ Input 8-bit single-channel mask of same size as `img` and type -% `uint8`. Its elements may have one of the following values: -% * __0__ an obvious background pixel -% * __1__ an obvious foreground (object) pixel -% * __2__ a possible background pixel -% * __3__ a possible foreground pixel +% `uint8`. Its elements may have one of the following values: +% * __0__ an obvious background pixel +% * __1__ an obvious foreground (object) pixel +% * __2__ a possible background pixel +% * __3__ a possible foreground pixel % % ## Output % * __mask__ output 8-bit single-channel updated mask. The mask is initialized -% by the function when `Mode` is set to 'InitWithRect' (see `rect`). +% by the function when `Mode` is set to 'InitWithRect' (see `rect`). % * __bgdmodel__ Output array for the background model, to be used for next -% iteration. Do not modify it while you are processing the same image. +% iteration. Do not modify it while you are processing the same image. % * __fgdmodel__ Output array for the foreground model, to be used for next -% iteration. Do not modify it while you are processing the same image. +% iteration. Do not modify it while you are processing the same image. % % ## Options % * __BgdModel__ Initial array for the background model. A 1x65 double vector. % * __FgdModel__ Initial array for the foreground model. A 1x65 double vector. % * __IterCount__ Number of iterations the algorithm should make before -% returning the result. Note that the result can be refined with further -% calls with `Mode` as 'InitWithMask' or 'Eval'. Default 10 +% returning the result. Note that the result can be refined with further +% calls with `Mode` as 'InitWithMask' or 'Eval'. Default 10 % * __Mode__ Operation mode, default 'Eval'. Could be one of the following: -% * __InitWithRect__ The function initializes the state and the mask -% using the provided rectangle. After that it runs `IterCount` -% iterations of the algorithm. This should only be used with the -% variant of the function that takes `rect` as input. -% * __InitWithMask__ The function initializes the state using the -% provided `mask`. Then, all the pixels outside of the ROI are -% automatically initialized as background with label 0. This -% should only be used with the variant of the function that takes -% `mask` as input. -% * __Eval__ The value means that the algorithm should just resume. +% * __InitWithRect__ The function initializes the state and the mask using +% the provided rectangle. After that it runs `IterCount` iterations of the +% algorithm. This should only be used with the variant of the function +% that takes `rect` as input. +% * __InitWithMask__ The function initializes the state using the provided +% `mask`. Then, all the pixels outside of the ROI are automatically +% initialized as background with label 0. This should only be used with +% the variant of the function that takes `mask` as input. +% * __Eval__ The value means that the algorithm should just resume. % % The function implements the GrabCut image segmentation algorithm: -% [GrabCut](http://en.wikipedia.org/wiki/GrabCut). +% [GrabCut](https://en.wikipedia.org/wiki/GrabCut). % % See also: cv.watershed, lazysnapping, imageSegmenter, regionfill % diff --git a/+cv/groupRectangles.m b/+cv/groupRectangles.m index f669d711c..357543902 100644 --- a/+cv/groupRectangles.m +++ b/+cv/groupRectangles.m @@ -1,35 +1,35 @@ %GROUPRECTANGLES Groups the object candidate rectangles % -% rects = cv.groupRectangles(rects) -% [rects,weights,levelWeights] = cv.groupRectangles(...) -% [...] = cv.groupRectangles(..., 'OptionName', optionValue, ...) +% rects = cv.groupRectangles(rects) +% [rects,weights,levelWeights] = cv.groupRectangles(...) +% [...] = cv.groupRectangles(..., 'OptionName', optionValue, ...) % % ## Input % * __rects__ Input cell array of rectangles, where each rectangle is -% represented as a 4-element vector `{[x,y,w,h], ...}`, or a numeric -% Nx4/Nx1x4/1xNx4 array. +% represented as a 4-element vector `{[x,y,w,h], ...}`, or a numeric +% Nx4/Nx1x4/1xNx4 array. % % ## Output % * __rects__ Output array of retained and grouped rectangles. Same format as -% input `rects` (either Nx4 numeric array or cell array). Grouped -% rectangles are the average of all rectangles in that cluster. +% input `rects` (either Nx4 numeric array or cell array). Grouped rectangles +% are the average of all rectangles in that cluster. % * __weights__ optional output of filled/updated weights, with same length as -% output `rects`. See `Weights` option. Corresponding grouped weights -% are the maximum weights of all rectangles in that cluster. +% output `rects`. See `Weights` option. Corresponding grouped weights are +% the maximum weights of all rectangles in that cluster. % * __levelWeights__ optional output of filled/updated level weights, with -% same length as output `rects`. See `LevelWeights` option. +% same length as output `rects`. See `LevelWeights` option. % % ## Options % * __Thresh__ Minimum possible number of rectangles in a group minus 1. The -% threshold is used in a group of rectangles to decide whether to retain -% it or not. If less than or equal to zero, no grouping is performed. -% default 1 (i.e only groups with two or more rectangles are kept). -% * __EPS__ Relative difference between sides of the rectangles to merge -% them into a group. default 0.2 +% threshold is used in a group of rectangles to decide whether to retain it +% or not. If less than or equal to zero, no grouping is performed. default 1 +% (i.e only groups with two or more rectangles are kept). +% * __EPS__ Relative difference between sides of the rectangles to merge them +% into a group. default 0.2 % * __Weights__ optional vector of associated weights of same length as input -% `rects`. Not set by default +% `rects`. Not set by default % * __LevelWeights__ optional vector of doubles of same length as input -% `rects`. Not set by default +% `rects`. Not set by default % % The function is a wrapper for the generic partition function. It clusters % all the input rectangles using the rectangle equivalence criteria that diff --git a/+cv/groupRectangles_meanshift.m b/+cv/groupRectangles_meanshift.m index 89457b8e2..570a8f7f8 100644 --- a/+cv/groupRectangles_meanshift.m +++ b/+cv/groupRectangles_meanshift.m @@ -1,23 +1,23 @@ %GROUPRECTANGLES_MEANSHIFT Groups the object candidate rectangles using meanshift % -% [rects, weights] = cv.groupRectangles_meanshift(rects, weights, scales) -% [...] = cv.groupRectangles_meanshift(..., 'OptionName', optionValue, ...) +% [rects, weights] = cv.groupRectangles_meanshift(rects, weights, scales) +% [...] = cv.groupRectangles_meanshift(..., 'OptionName', optionValue, ...) % % ## Input % * __rects__ Input cell array of rectangles, where each rectangle is -% represented as a 4-element vector `{[x,y,w,h], ...}`, or a numeric -% Nx4/Nx1x4/1xNx4 array. +% represented as a 4-element vector `{[x,y,w,h], ...}`, or a numeric +% Nx4/Nx1x4/1xNx4 array. % * __weights__ Input vector of associated weights. % * __scales__ Input vector of corresponding rectangles scales. % % ## Output % * __rects__ Output array of retained and grouped rectangles. Same format as -% input `rects` (either Nx4 numeric array or cell array). +% input `rects` (either Nx4 numeric array or cell array). % * __weights__ Output updated weights. % % ## Options % * __DetectThreshold__ detection threshold (weight) above which resulting -% modes are kept. default 0.0 +% modes are kept. default 0.0 % * __WinDetSize__ window size `[w,h]`. default [64,128] % % See also: cv.groupRectangles, cv.SimilarRects diff --git a/+cv/illuminationChange.m b/+cv/illuminationChange.m index 9ab53f9db..17b26495e 100644 --- a/+cv/illuminationChange.m +++ b/+cv/illuminationChange.m @@ -1,7 +1,7 @@ %ILLUMINATIONCHANGE Illumination Change % -% dst = cv.illuminationChange(src, mask) -% dst = cv.illuminationChange(src, mask, 'OptionName',optionValue, ...) +% dst = cv.illuminationChange(src, mask) +% dst = cv.illuminationChange(src, mask, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -14,8 +14,8 @@ % * __Alpha__ Value ranges between 0-2. default 0.2 % * __Beta__ Value ranges between 0-2. default 0.4 % * __FlipChannels__ whether to flip the order of color channels in inputs -% `src` and `mask` and output `dst`, between MATLAB's RGB order and -% OpenCV's BGR (input: RGB->BGR, output: BGR->RGB). default false +% `src` and `mask` and output `dst`, between MATLAB's RGB order and OpenCV's +% BGR (input: RGB->BGR, output: BGR->RGB). default false % % Applying an appropriate non-linear transformation to the gradient field % inside the selection and then integrating back with a Poisson solver, diff --git a/+cv/imdecode.m b/+cv/imdecode.m index 430c4d095..d5662aa7e 100644 --- a/+cv/imdecode.m +++ b/+cv/imdecode.m @@ -1,7 +1,7 @@ %IMDECODE Reads an image from a buffer in memory % -% img = cv.imdecode(buf) -% img = cv.imdecode(buf, 'OptionName',optionValue, ...) +% img = cv.imdecode(buf) +% img = cv.imdecode(buf, 'OptionName',optionValue, ...) % % ## Input % * __buf__ Input byte array of an encoded image (`uint8` vector). @@ -11,36 +11,36 @@ % % ## Options % * __Unchanged__ If set, return the loaded image as is (with alpha channel, -% otherwise it gets cropped). Both the depth and number of channels are -% unchanged as determined by the decoder. default false +% otherwise it gets cropped). Both the depth and number of channels are +% unchanged as determined by the decoder. default false % * __AnyDepth__ If set, return 16-bit/32-bit image when the input has the -% corresponding depth, otherwise convert it to 8-bit. default false +% corresponding depth, otherwise convert it to 8-bit. default false % * __AnyColor__ If set, the image is read in any possible color format. -% default false +% default false % * __Color__ If set, always convert image to the 3 channel BGR color image. -% default true +% default true % * __Grayscale__ If set, always convert image to the single channel grayscale -% image. default false +% image. default false % * __IgnoreOrientation__ If set, do not rotate the image according to EXIF's -% orientation flag. default false +% orientation flag. default false % * __Flags__ Advanced option to directly set the flag specifying the depth -% and color type of a loaded image. Note that setting this integer flag -% overrides all the other flag options. Not set by default: -% * `>0`: Return a 3-channel color image. Note that in the current -% implementation the alpha channel, if any, is stripped from the -% output image. For example, a 4-channel RGBA image is loaded as -% RGB if `Flags >= 0`. -% * `=0`: Return a grayscale image -% * `<0`: Return the loaded image as is (with alpha channel if present). +% and color type of a loaded image. Note that setting this integer flag +% overrides all the other flag options. Not set by default: +% * `>0`: Return a 3-channel color image. Note that in the current +% implementation the alpha channel, if any, is stripped from the +% output image. For example, a 4-channel RGBA image is loaded as +% RGB if `Flags >= 0`. +% * `=0`: Return a grayscale image +% * `<0`: Return the loaded image as is (with alpha channel if present). % * __FlipChannels__ in case the output is color image, flips the color order -% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true +% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true % % The function reads an image from the specified buffer in the memory. If % the buffer is too short or contains invalid data, an error is thrown. % % See cv.imread for the list of supported formats and flags description. % -% ## Note +% ### Note % In the case of color images, the decoded images will have the channels % stored in BGR order. If `FlipChannels` is true, the order is flipped to % RGB. diff --git a/+cv/imencode.m b/+cv/imencode.m index ccbaaf1f2..ceb0bacd7 100644 --- a/+cv/imencode.m +++ b/+cv/imencode.m @@ -1,81 +1,80 @@ %IMENCODE Encodes an image into a memory buffer % -% buf = cv.imencode(ext, img) -% buf = cv.imencode(ext, img, 'OptionName', optionValue, ...) -% [buf, success] = cv.imencode(...) +% buf = cv.imencode(ext, img) +% buf = cv.imencode(ext, img, 'OptionName', optionValue, ...) +% [buf, success] = cv.imencode(...) % % ## Input % * __ext__ File extension that defines the output format. For example: -% '.bmp', '.jpg', '.png', '.tif', etc. +% '.bmp', '.jpg', '.png', '.tif', etc. % * __img__ Image to be encoded. % % ## Output % * __buf__ Output buffer of the compressed image. A row vector of type -% `uint8` that contains encoded image as an array of bytes. -% If the image cannot be encoded, the function throws an error. +% `uint8` that contains encoded image as an array of bytes. +% If the image cannot be encoded, the function throws an error. % * __success__ optional output flag, true on success, false otherwise. If not -% requested, the function throws an error on fail. +% requested, the function throws an error on fail. % % ## Options % * __FlipChannels__ in case the input is color image, flips the color order -% from MATLAB's RGB/RGBA to OpenCV's BGR/BGRA order. default true +% from MATLAB's RGB/RGBA to OpenCV's BGR/BGRA order. default true % % The following format-specific save parameters are currently supported: % % * __JpegQuality__ For JPEG, it can be a quality from 0 to 100 (the higher is -% the better). Default value is 95. +% the better). Default value is 95. % * __JpegProgressive__ Enable JPEG features, 0 or 1, default is false. % * __JpegOptimize__ Enable JPEG features, 0 or 1, default is false. -% * __JpegResetInterval__ JPEG restart interval, 0 - 65535, -% default is 0 (no restart). -% * __JpegLumaQuality__ Separate luma quality level, 0 - 100, -% default is 0 (don't use). -% * __JpegChromaQuality__ Separate chroma quality level, 0 - 100, -% default is 0 (don't use). +% * __JpegResetInterval__ JPEG restart interval, 0 - 65535, default is 0 +% (no restart). +% * __JpegLumaQuality__ Separate luma quality level, 0 - 100, default is 0 +% (don't use). +% * __JpegChromaQuality__ Separate chroma quality level, 0 - 100, default is 0 +% (don't use). % * __PngCompression__ For PNG, it can be the compression level from 0 to 9. -% A higher value means a smaller size and longer compression time. -% Default value is 3. +% A higher value means a smaller size and longer compression time. +% Default value is 3. % * __PngStrategy__ For PNG; used to tune the compression algorithm. These -% flags will be modify the way of PNG image compression and will be -% passed to the underlying zlib processing stage. The strategy parameter -% only affects the compression ratio but not the correctness of the -% compressed output even if it is not set appropriately. One of: -% * __Default__ (default) Use this value for normal data. -% * __Filtered__ Use this value for data produced by a filter (or -% predictor). Filtered data consists mostly of small values with a -% somewhat random distribution. In this case, the compression -% algorithm is tuned to compress them better. The effect of -% `Filtered` is to force more Huffman coding and less string -% matching; it is somewhat intermediate between `Default` and -% `HuffmanOnly`. -% * __HuffmanOnly__ Use this value to force Huffman encoding only -% (no string match). -% * __RLE__ Use this value to limit match distances to one (run-length -% encoding). `RLE` is designed to be almost as fast as -% `HuffmanOnly`, but give better compression for PNG image data. -% * __Fixed__ Using this value prevents the use of dynamic Huffman -% codes, allowing for a simpler decoder for special applications. +% flags will be modify the way of PNG image compression and will be passed +% to the underlying zlib processing stage. The strategy parameter only +% affects the compression ratio but not the correctness of the compressed +% output even if it is not set appropriately. One of: +% * __Default__ (default) Use this value for normal data. +% * __Filtered__ Use this value for data produced by a filter (or predictor). +% Filtered data consists mostly of small values with a somewhat random +% distribution. In this case, the compression algorithm is tuned to +% compress them better. The effect of `Filtered` is to force more Huffman +% coding and less string matching; it is somewhat intermediate between +% `Default` and `HuffmanOnly`. +% * __HuffmanOnly__ Use this value to force Huffman encoding only +% (no string match). +% * __RLE__ Use this value to limit match distances to one (run-length +% encoding). `RLE` is designed to be almost as fast as `HuffmanOnly`, but +% give better compression for PNG image data. +% * __Fixed__ Using this value prevents the use of dynamic Huffman codes, +% allowing for a simpler decoder for special applications. % * __PngBilevel__ Binary level PNG, 0 or 1, controls packing of pixels per -% bytes. If false, PNG files pack pixels of bit-depths 1, 2, and 4 into -% bytes as small as possible. default is false. +% bytes. If false, PNG files pack pixels of bit-depths 1, 2, and 4 into +% bytes as small as possible. default is false. % * __PxmBinary__ For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1, -% to specify ASCII or binary encoding. default is true. +% to specify ASCII or binary encoding. default is true. % * __WebpQuality__ For WEBP, it can be a quality from 1 to 100 (the higher is -% the better). By default (without any parameter) and for quality above -% 100 the lossless compression is used. +% the better). By default (without any parameter) and for quality above 100 +% the lossless compression is used. % * __PamTupleType__ For PAM, sets the TUPLETYPE field to the corresponding -% string value that is defined for the format. One of: -% * __Null__ -% * __BlackWhite__ -% * __Grayscale__ -% * __GrayscaleAlpha__ -% * __RGB__ -% * __RGBA__ +% string value that is defined for the format. One of: +% * __Null__ +% * __BlackWhite__ +% * __Grayscale__ +% * __GrayscaleAlpha__ +% * __RGB__ +% * __RGBA__ % % For advanced uses, you can directly pass a vector of paramters: % % * __Params__ Format-specific save parameters encoded as pairs: -% `[paramId_1, paramValue_1, paramId_2, paramValue_2, ...]`. +% `[paramId_1, paramValue_1, paramId_2, paramValue_2, ...]`. % % The function compresses the image and stores it in the memory buffer. See % cv.imwrite for the list of supported formats and flags description. diff --git a/+cv/imread.m b/+cv/imread.m index a26078327..329e46b45 100644 --- a/+cv/imread.m +++ b/+cv/imread.m @@ -1,7 +1,7 @@ %IMREAD Loads an image from a file % -% img = cv.imread(filename) -% img = cv.imread(filename, 'OptionName',optionValue, ...) +% img = cv.imread(filename) +% img = cv.imread(filename, 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of a file to be loaded. @@ -11,37 +11,37 @@ % % ## Options % * __Unchanged__ If set, return the loaded image as is (with alpha channel, -% otherwise it gets cropped). Both the depth and number of channels are -% unchanged as determined by the decoder. default false +% otherwise it gets cropped). Both the depth and number of channels are +% unchanged as determined by the decoder. default false % * __AnyDepth__ If set, return 16-bit/32-bit image when the input has the -% corresponding depth, otherwise convert it to 8-bit. default false +% corresponding depth, otherwise convert it to 8-bit. default false % * __AnyColor__ If set, the image is read in any possible color format. -% default false +% default false % * __Color__ If set, always convert image to the 3 channel BGR color image. -% default true +% default true % * __Grayscale__ If set, always convert image to the single channel grayscale -% image. default false +% image. default false % * __GDAL__ If set, use the gdal driver for loading the image. default false % * __ReduceScale__ Loads the image reduced by a scale factor (JPEG library -% natively supports direct image scaling, other formats are resized -% after loading). One of: -% * `1`: no scaling (default). -% * `2`: image scaled by 1/2 factor. -% * `4`: image scaled by 1/4 factor. -% * `8`: image scaled by 1/8 factor. +% natively supports direct image scaling, other formats are resized after +% loading). One of: +% * `1`: no scaling (default). +% * `2`: image scaled by 1/2 factor. +% * `4`: image scaled by 1/4 factor. +% * `8`: image scaled by 1/8 factor. % * __IgnoreOrientation__ If set, do not rotate the image according to EXIF's -% orientation flag. default false +% orientation flag. default false % * __Flags__ Advanced option to directly set the flag specifying the depth -% and color type of a loaded image. Note that setting this integer flag -% overrides all the other flag options. Not set by default: -% * `>0`: Return a 3-channel color image. Note that in the current -% implementation the alpha channel, if any, is stripped from the -% output image. For example, a 4-channel RGBA image is loaded as -% RGB if `Flags >= 0`. -% * `=0`: Return a grayscale image -% * `<0`: Return the loaded image as is (with alpha channel if present). +% and color type of a loaded image. Note that setting this integer flag +% overrides all the other flag options. Not set by default: +% * `>0`: Return a 3-channel color image. Note that in the current +% implementation the alpha channel, if any, is stripped from the +% output image. For example, a 4-channel RGBA image is loaded as +% RGB if `Flags >= 0`. +% * `=0`: Return a grayscale image +% * `<0`: Return the loaded image as is (with alpha channel if present). % * __FlipChannels__ in case the output is color image, flips the color order -% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true +% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true % % The function cv.imread loads an image from the specified file and returns % it. If the image cannot be read (because of missing file, improper @@ -63,37 +63,31 @@ % * Raster and Vector geospatial data supported by Gdal (see the Notes section) % * DICOM medical images - `*.dcm` (see the Notes section) % -% ## Notes -% The function determines the type of an image by the content, not by the file -% extension. -% -% In the case of color images, the decoded images will have the channels -% stored in BGR order. If `FlipChannels` is set, the channels are flipped to -% RGB order. -% -% On Microsoft Windows OS and MacOSX, the codecs shipped with an OpenCV image -% (libjpeg, libpng, libtiff, and libjasper) are used by default. So, OpenCV can -% always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also an option to use -% native MacOSX image readers. But beware that currently these native image -% loaders give images with different pixel values because of the color -% management embedded into MacOSX. -% -% On Linux, BSD flavors and other Unix-like open-source operating systems, -% OpenCV looks for codecs supplied with an OS image. Install the relevant -% packages (do not forget the development files, for example, "libjpeg-dev", -% in Debian and Ubuntu) to get the codec support or turn on the -% `OPENCV_BUILD_3RDPARTY_LIBS` flag in CMake. -% -% In the case you set `WITH_GDAL` flag to true in CMake and `GDAL` option to -% load the image, then [GDAL driver](http://www.gdal.org) will be used in -% order to decode the image by supporting the following formats: -% -% * [Raster](http://www.gdal.org/formats_list.html), -% * [Vector](http://www.gdal.org/ogr_formats.html). -% -% If EXIF information are embedded in the image file, the EXIF orientation -% will be taken into account and thus the image will be rotated accordingly -% except if the option `IgnoreOrientation` is passed. +% ### Notes +% * The function determines the type of an image by the content, not by the +% file extension. +% * In the case of color images, the decoded images will have the channels +% stored in BGR order. If `FlipChannels` is set, the channels are flipped to +% RGB order. +% * On Microsoft Windows OS and MacOSX, the codecs shipped with an OpenCV +% image (libjpeg, libpng, libtiff, and libjasper) are used by default. So, +% OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also an +% option to use native MacOSX image readers. But beware that currently these +% native image loaders give images with different pixel values because of +% the color management embedded into MacOSX. +% * On Linux, BSD flavors and other Unix-like open-source operating systems, +% OpenCV looks for codecs supplied with an OS image. Install the relevant +% packages (do not forget the development files, for example, "libjpeg-dev", +% in Debian and Ubuntu) to get the codec support or turn on the +% `OPENCV_BUILD_3RDPARTY_LIBS` flag in CMake. +% * In the case you set `WITH_GDAL` flag to true in CMake and `GDAL` option to +% load the image, then [GDAL driver](http://www.gdal.org) will be used in +% order to decode the image by supporting the following formats: +% * [Raster](http://www.gdal.org/formats_list.html), +% * [Vector](http://www.gdal.org/ogr_formats.html). +% * If EXIF information are embedded in the image file, the EXIF orientation +% will be taken into account and thus the image will be rotated accordingly +% except if the option `IgnoreOrientation` is passed. % % See also: cv.imwrite, cv.imdecode, imread, imfinfo, imformats, dicomread, % hdrread, Tiff diff --git a/+cv/imreadmulti.m b/+cv/imreadmulti.m index 5b2c45419..21d8ceac6 100644 --- a/+cv/imreadmulti.m +++ b/+cv/imreadmulti.m @@ -1,43 +1,42 @@ %IMREADMULTI Loads a multi-page image from a file % -% imgs = cv.imreadmulti(filename) -% imgs = cv.imreadmulti(filename, 'OptionName',optionValue, ...) +% imgs = cv.imreadmulti(filename) +% imgs = cv.imreadmulti(filename, 'OptionName',optionValue, ...) % % ## Input % * __filename__ Name of file to be loaded. % % ## Output % * __imgs__ A cell-array of images holding each page, if more than one. If -% If the image cannot be read (because of IO errors, improper -% permissions, unsupported or invalid format), the function throws an -% error. +% If the image cannot be read (because of IO errors, improper permissions, +% unsupported or invalid format), the function throws an error. % % ## Options % * __Unchanged__ If set, return the loaded image as is (with alpha channel, -% otherwise it gets cropped). Both the depth and number of channels are -% unchanged as determined by the decoder. default false +% otherwise it gets cropped). Both the depth and number of channels are +% unchanged as determined by the decoder. default false % * __AnyDepth__ If set, return 16-bit/32-bit image when the input has the -% corresponding depth, otherwise convert it to 8-bit. default false +% corresponding depth, otherwise convert it to 8-bit. default false % * __AnyColor__ If set, the image is read in any possible color format. -% default true +% default true % * __Color__ If set, always convert image to the 3 channel BGR color image. -% default false +% default false % * __Grayscale__ If set, always convert image to the single channel grayscale -% image. default false +% image. default false % * __GDAL__ If set, use the gdal driver for loading the image. default false % * __IgnoreOrientation__ If set, do not rotate the image according to EXIF's -% orientation flag. default false +% orientation flag. default false % * __Flags__ Advanced option to directly set the flag specifying the depth -% and color type of a loaded image. Note that setting this integer flag -% overrides all the other flag options. Not set by default: -% * `>0`: Return a 3-channel color image. Note that in the current -% implementation the alpha channel, if any, is stripped from the -% output image. For example, a 4-channel RGBA image is loaded as -% RGB if `Flags >= 0`. -% * `=0`: Return a grayscale image -% * `<0`: Return the loaded image as is (with alpha channel if present). +% and color type of a loaded image. Note that setting this integer flag +% overrides all the other flag options. Not set by default: +% * `>0`: Return a 3-channel color image. Note that in the current +% implementation the alpha channel, if any, is stripped from the +% output image. For example, a 4-channel RGBA image is loaded as +% RGB if `Flags >= 0`. +% * `=0`: Return a grayscale image +% * `<0`: Return the loaded image as is (with alpha channel if present). % * __FlipChannels__ in case the output is color image, flips the color order -% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true +% from OpenCV's BGR/BGRA to MATLAB's RGB/RGBA order. default true % % The function cv.imreadmulti loads a multi-page image from the specified file % into a cell-array of matrices. diff --git a/+cv/imwrite.m b/+cv/imwrite.m index 889c5152c..281360131 100644 --- a/+cv/imwrite.m +++ b/+cv/imwrite.m @@ -1,8 +1,8 @@ %IMWRITE Saves an image to a specified file % -% cv.imwrite(filename, img) -% cv.imwrite(filename, img, 'OptionName', optionValue, ...) -% success = cv.imwrite(...) +% cv.imwrite(filename, img) +% cv.imwrite(filename, img, 'OptionName', optionValue, ...) +% success = cv.imwrite(...) % % ## Input % * __filename__ Name of the file. @@ -10,69 +10,68 @@ % % ## Output % * __success__ optional output flag, true on success, false otherwise. If not -% requested, the function throws an error on fail. +% requested, the function throws an error on fail. % % ## Options % * __FlipChannels__ in case the input is color image, flips the color order -% from MATLAB's RGB/RGBA to OpenCV's BGR/BGRA order. default true +% from MATLAB's RGB/RGBA to OpenCV's BGR/BGRA order. default true % % The following format-specific save parameters are currently supported: % % * __JpegQuality__ For JPEG, it can be a quality from 0 to 100 (the higher is -% the better). Default value is 95. +% the better). Default value is 95. % * __JpegProgressive__ Enable JPEG features, 0 or 1, default is false. % * __JpegOptimize__ Enable JPEG features, 0 or 1, default is false. -% * __JpegResetInterval__ JPEG restart interval, 0 - 65535, -% default is 0 (no restart). -% * __JpegLumaQuality__ Separate luma quality level, 0 - 100, -% default is 0 (don't use). -% * __JpegChromaQuality__ Separate chroma quality level, 0 - 100, -% default is 0 (don't use). +% * __JpegResetInterval__ JPEG restart interval, 0 - 65535, default is 0 +% (no restart). +% * __JpegLumaQuality__ Separate luma quality level, 0 - 100, default is 0 +% (don't use). +% * __JpegChromaQuality__ Separate chroma quality level, 0 - 100, default is 0 +% (don't use). % * __PngCompression__ For PNG, it can be the compression level from 0 to 9. -% A higher value means a smaller size and longer compression time. If -% specified, `PngStrategy` is changed to `Default`. Default value is 1 -% (best speed setting). +% A higher value means a smaller size and longer compression time. If +% specified, `PngStrategy` is changed to `Default`. Default value is 1 +% (best speed setting). % * __PngStrategy__ For PNG; used to tune the compression algorithm. These -% flags will be modify the way of PNG image compression and will be -% passed to the underlying zlib processing stage. The strategy parameter -% only affects the compression ratio but not the correctness of the -% compressed output even if it is not set appropriately. One of: -% * __Default__ Use this value for normal data. -% * __Filtered__ Use this value for data produced by a filter (or -% predictor). Filtered data consists mostly of small values with a -% somewhat random distribution. In this case, the compression -% algorithm is tuned to compress them better. The effect of -% `Filtered` is to force more Huffman coding and less string -% matching; it is somewhat intermediate between `Default` and -% `HuffmanOnly`. -% * __HuffmanOnly__ Use this value to force Huffman encoding only -% (no string match). -% * __RLE__ (default) Use this value to limit match distances to one -% (run-length encoding). `RLE` is designed to be almost as fast as -% `HuffmanOnly`, but give better compression for PNG image data. -% * __Fixed__ Using this value prevents the use of dynamic Huffman -% codes, allowing for a simpler decoder for special applications. +% flags will be modify the way of PNG image compression and will be passed +% to the underlying zlib processing stage. The strategy parameter only +% affects the compression ratio but not the correctness of the compressed +% output even if it is not set appropriately. One of: +% * __Default__ Use this value for normal data. +% * __Filtered__ Use this value for data produced by a filter (or predictor). +% Filtered data consists mostly of small values with a somewhat random +% distribution. In this case, the compression algorithm is tuned to +% compress them better. The effect of `Filtered` is to force more Huffman +% coding and less string matching; it is somewhat intermediate between +% `Default` and `HuffmanOnly`. +% * __HuffmanOnly__ Use this value to force Huffman encoding only +% (no string match). +% * __RLE__ (default) Use this value to limit match distances to one +% (run-length encoding). `RLE` is designed to be almost as fast as +% `HuffmanOnly`, but give better compression for PNG image data. +% * __Fixed__ Using this value prevents the use of dynamic Huffman codes, +% allowing for a simpler decoder for special applications. % * __PngBilevel__ Binary level PNG, 0 or 1, controls packing of pixels per -% bytes. If false, PNG files pack pixels of bit-depths 1, 2, and 4 into -% bytes as small as possible. default is false. +% bytes. If false, PNG files pack pixels of bit-depths 1, 2, and 4 into +% bytes as small as possible. default is false. % * __PxmBinary__ For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1, -% to specify ASCII or binary encoding. default is true. +% to specify ASCII or binary encoding. default is true. % * __WebpQuality__ For WEBP, it can be a quality from 1 to 100 (the higher is -% the better). By default (without any parameter) and for quality above -% 100 the lossless compression is used. +% the better). By default (without any parameter) and for quality above 100 +% the lossless compression is used. % * __PamTupleType__ For PAM, sets the TUPLETYPE field to the corresponding -% string value that is defined for the format. One of: -% * __Null__ -% * __BlackWhite__ -% * __Grayscale__ -% * __GrayscaleAlpha__ -% * __RGB__ -% * __RGBA__ +% string value that is defined for the format. One of: +% * __Null__ +% * __BlackWhite__ +% * __Grayscale__ +% * __GrayscaleAlpha__ +% * __RGB__ +% * __RGBA__ % % For advanced uses, you can directly pass a vector of paramters: % % * __Params__ Format-specific save parameters encoded as pairs: -% `[paramId_1, paramValue_1, paramId_2, paramValue_2, ...]`. +% `[paramId_1, paramValue_1, paramId_2, paramValue_2, ...]`. % % The function cv.imwrite saves the image to the specified file. The image % format is chosen based on the filename extension (see cv.imread for the list @@ -99,24 +98,24 @@ % The sample below shows how to create such a RGBA image and store to PNG file. % It also demonstrates how to set custom compression parameters: % -% % Create mat with alpha channel -% nrows = 480; ncols = 640; -% [I,J] = ndgrid(1:nrows, 1:ncols); -% img = zeros(nrows, ncols, 4, 'uint8'); -% img(:,:,1) = uint8(255 * (nrows-I+1)/nrows); % red -% img(:,:,2) = uint8(255 * (ncols-J+1)/ncols); % green -% img(:,:,3) = uint8(255); % blue -% img(:,:,4) = uint8(0.5 * sum(img(:,:,[1 2]),3)); % alpha -% -% % save PNG file with alpha data -% %imwrite(img(:,:,1:3), 'alpha.png', 'Alpha',img(:,:,4)) -% cv.imwrite('alpha.png', img, 'PngCompression',9, 'PngStrategy','RLE'); -% imfinfo('alpha.png') -% -% % show image with alpha transparency -% figure('Menubar','none', 'Color','k') -% image(img(:,:,1:3), 'AlphaData',img(:,:,4)) -% axis image off +% % Create mat with alpha channel +% nrows = 480; ncols = 640; +% [I,J] = ndgrid(1:nrows, 1:ncols); +% img = zeros(nrows, ncols, 4, 'uint8'); +% img(:,:,1) = uint8(255 * (nrows-I+1)/nrows); % red +% img(:,:,2) = uint8(255 * (ncols-J+1)/ncols); % green +% img(:,:,3) = uint8(255); % blue +% img(:,:,4) = uint8(0.5 * sum(img(:,:,[1 2]),3)); % alpha +% +% % save PNG file with alpha data +% %imwrite(img(:,:,1:3), 'alpha.png', 'Alpha',img(:,:,4)) +% cv.imwrite('alpha.png', img, 'PngCompression',9, 'PngStrategy','RLE'); +% imfinfo('alpha.png') +% +% % show image with alpha transparency +% figure('Menubar','none', 'Color','k') +% image(img(:,:,1:3), 'AlphaData',img(:,:,4)) +% axis image off % % See also: cv.imread, cv.imencode, imwrite, dicomwrite, hdrwrite % diff --git a/+cv/inRange.m b/+cv/inRange.m index 23b1b7be7..9f875fe3b 100644 --- a/+cv/inRange.m +++ b/+cv/inRange.m @@ -1,6 +1,6 @@ %INRANGE Checks if array elements lie between the elements of two other arrays % -% dst = cv.inRange(src, lowerb, upperb) +% dst = cv.inRange(src, lowerb, upperb) % % ## Input % * __src__ first input array. @@ -14,12 +14,12 @@ % % - For every element of a single-channel input array: % -% dst(i,j,..) = lowerb(i,j,..,1) <= src(i,j,..,1) <= upperb(i,j,..,1) +% dst(i,j,..) = lowerb(i,j,..,1) <= src(i,j,..,1) <= upperb(i,j,..,1) % % - For two-channel arrays: % -% dst(i,j,..) = lowerb(i,j,..,1) <= src(i,j,..,1) <= upperb(i,j,..,1) and -% lowerb(i,j,..,2) <= src(i,j,..,2) <= upperb(i,j,..,2) +% dst(i,j,..) = lowerb(i,j,..,1) <= src(i,j,..,1) <= upperb(i,j,..,1) and +% lowerb(i,j,..,2) <= src(i,j,..,2) <= upperb(i,j,..,2) % % - and so forth. % diff --git a/+cv/initCameraMatrix2D.m b/+cv/initCameraMatrix2D.m index 01fc3a2a5..d48e8de45 100644 --- a/+cv/initCameraMatrix2D.m +++ b/+cv/initCameraMatrix2D.m @@ -1,25 +1,24 @@ %INITCAMERAMATRIX2D Finds an initial camera matrix from 3D-2D point correspondences % -% cameraMatrix = cv.initCameraMatrix2D(objectPoints, imagePoints, imageSize) -% [...] = cv.initCameraMatrix2D(..., 'OptionName', optionValue, ...) +% cameraMatrix = cv.initCameraMatrix2D(objectPoints, imagePoints, imageSize) +% [...] = cv.initCameraMatrix2D(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ Vector of vectors of the calibration pattern points in -% the calibration pattern coordinate space. Cell array of cell array -% of 3-element vectors are accepted `{{[x,y,z],...}, ...}`. +% the calibration pattern coordinate space. Cell array of cell array of +% 3-element vectors are accepted `{{[x,y,z],...}, ...}`. % * __imagePoints__ Vector of vectors of the projections of the calibration -% pattern points. Cell array of cell array of 2-element vectors are -% accepted `{{[x,y],...}, ...}`. -% * __imageSize__ Image size in pixels used to initialize the principal -% point `[w,h]`. +% pattern points. Cell array of cell array of 2-element vectors are accepted +% `{{[x,y],...}, ...}`. +% * __imageSize__ Image size in pixels used to initialize the principal point +% `[w,h]`. % % ## Output % * __cameraMatrix__ Camera matrix 3x3, `A = [fx 0 cx; 0 fy cy; 0 0 1]` % % ## Options % * __AspectRatio__ If it is zero or negative, both `fx` and `fy` are -% estimated independently. Otherwise, `fx = fy * AspectRatio`. -% default 1.0 +% estimated independently. Otherwise, `fx = fy * AspectRatio`. default 1.0 % % The function estimates and returns an initial camera matrix for the % camera calibration process. Currently, the function only supports planar diff --git a/+cv/initUndistortRectifyMap.m b/+cv/initUndistortRectifyMap.m index 32b1f5497..bac49ce03 100644 --- a/+cv/initUndistortRectifyMap.m +++ b/+cv/initUndistortRectifyMap.m @@ -1,14 +1,14 @@ %INITUNDISTORTRECTIFYMAP Computes the undistortion and rectification transformation map % -% [map1, map2] = cv.initUndistortRectifyMap(cameraMatrix, distCoeffs, siz) -% [...] = cv.initUndistortRectifyMap(..., 'OptionName', optionValue, ...) +% [map1, map2] = cv.initUndistortRectifyMap(cameraMatrix, distCoeffs, siz) +% [...] = cv.initUndistortRectifyMap(..., 'OptionName', optionValue, ...) % % ## Input % * __cameraMatrix__ Input camera matrix `A = [f_x 0 c_x; 0 f_y c_y; 0 0 1]` % * __distCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. % * __siz__ Undistorted image size `[w,h]`. % % ## Output @@ -17,23 +17,21 @@ % % ## Options % * __R__ Optional rectification transformation in the object space (3x3 -% matrix). `R1` or `R2`, computed by cv.stereoRectify can be passed -% here. If the matrix is empty, the identity transformation is assumed. -% default empty +% matrix). `R1` or `R2`, computed by cv.stereoRectify can be passed here. If +% the matrix is empty, the identity transformation is assumed. default empty % * __NewCameraMatrix__, __P__ New camera matrix (3x3) -% `Ap = [fp_x 0 cp_x; 0 fp_y cp_y; 0 0 1]` or new projection matrix `P` -% (3x4). If empty, uses the default new camera matrix from -% cv.getDefaultNewCameraMatrix (with `CenterPrincipalPoint=true`). -% default empty +% `Ap = [fp_x 0 cp_x; 0 fp_y cp_y; 0 0 1]` or new projection matrix `P` +% (3x4). If empty, uses the default new camera matrix from +% cv.getDefaultNewCameraMatrix (with `CenterPrincipalPoint=true`). +% default empty % * __M1Type__ Type of the first output map, default -1 (equivalent to -% `int16`). See cv.convertMaps. Accepted types are: -% * __int16__ first output map is a MxNx2 `int16` array, second output -% map is MxNx1 `uint16` (fixed-point representation). -% * __single1__ first output map is a MxNx1 `single` matrix, second -% output map is MxNx1 `single` (separate floating-point -% representation). -% * __single2__ first output map is a MxNx2 `single` matrix, second -% output map is empty (combined floating-point representation). +% `int16`). See cv.convertMaps. Accepted types are: +% * __int16__ first output map is a MxNx2 `int16` array, second output map +% MxNx1 `uint16` (fixed-point representation). +% * __single1__ first output map is a MxNx1 `single` matrix, second output +% map is MxNx1 `single` (separate floating-point representation). +% * __single2__ first output map is a MxNx2 `single` matrix, second output +% map is empty (combined floating-point representation). % % The function computes the joint undistortion and rectification % transformation and represents the result in the form of maps for @@ -56,21 +54,21 @@ % corresponding coordinates in the source image (that is, in the original % image from camera). The following process is applied: % -% x = (u - cp_x) / fp_x -% y = (v - cp_y) / fp_y -% [X Y Z]' = inv(R) * [x y 1]' -% xp = X / W -% yp = Y / W -% r^2 = xp^2 + yp^2 -% xpp = xp*(1 + k1*r^2 + k2*r^4 + k3*r^6)/(1 + k4*r^2 + k5*r^4 + k6*r^6) + -% 2*p1*xp*yp + p2*(r^2 + 2*xp^2) + s1*r^2 + s2*r^4 -% ypp = yp*(1 + k1*r^2 + k2*r^4 + k3*r^6)/(1 + k4*r^2 + k5*r^4 + k6*r^6) + -% p1*(r^2 + 2*yp^2) + 2*p2*xp*yp + s3*r^2 + s4*r^4 -% [xppp] [R33(taux,tauy) 0 -R13(taux,tauy)] [xpp] -% s*[yppp] = [ 0 R33(taux,tauy) -R23(taux,tauy)] * R(taux,tauy) * [ypp] -% [ 1] [ 0 0 1] [ 1] -% map_x(u,v) = xppp * f_x + c_x -% map_y(u,v) = yppp * f_y + c_y +% x = (u - cp_x) / fp_x +% y = (v - cp_y) / fp_y +% [X Y Z]' = inv(R) * [x y 1]' +% xp = X / W +% yp = Y / W +% r^2 = xp^2 + yp^2 +% xpp = xp*(1 + k1*r^2 + k2*r^4 + k3*r^6)/(1 + k4*r^2 + k5*r^4 + k6*r^6) + +% 2*p1*xp*yp + p2*(r^2 + 2*xp^2) + s1*r^2 + s2*r^4 +% ypp = yp*(1 + k1*r^2 + k2*r^4 + k3*r^6)/(1 + k4*r^2 + k5*r^4 + k6*r^6) + +% p1*(r^2 + 2*yp^2) + 2*p2*xp*yp + s3*r^2 + s4*r^4 +% [xppp] [R33(taux,tauy) 0 -R13(taux,tauy)] [xpp] +% s*[yppp] = [ 0 R33(taux,tauy) -R23(taux,tauy)] * R(taux,tauy) * [ypp] +% [ 1] [ 0 0 1] [ 1] +% map_x(u,v) = xppp * f_x + c_x +% map_y(u,v) = yppp * f_y + c_y % % where `k1`, `k2`, `p1`, `p2`, `k3`, `k4`, `k5`, `k6`, `s1`, `s2`, `s3`, % `s4`, `taux`, `tauy` are the distortion coefficients. @@ -84,7 +82,7 @@ % pixel domain, not a rotation matrix `R` in 3D space. `R` can be computed % from `H` as: % -% R = inv(cameraMatrix) * H * cameraMatrix +% R = inv(cameraMatrix) * H * cameraMatrix % % where `cameraMatrix` can be chosen arbitrarily. % diff --git a/+cv/initWideAngleProjMap.m b/+cv/initWideAngleProjMap.m index b67fdb135..bed3ee5f1 100644 --- a/+cv/initWideAngleProjMap.m +++ b/+cv/initWideAngleProjMap.m @@ -1,15 +1,15 @@ %INITWIDEANGLEPROJMAP Initializes maps for cv.remap for wide-angle % -% [map1, map2] = cv.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth) -% [map1, map2, scale] = cv.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth) -% [...] = cv.initWideAngleProjMap(..., 'OptionName',optionValue, ...) +% [map1, map2] = cv.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth) +% [map1, map2, scale] = cv.initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth) +% [...] = cv.initWideAngleProjMap(..., 'OptionName',optionValue, ...) % % ## Input % * __cameraMatrix__ Input camera matrix `A = [f_x 0 c_x; 0 f_y c_y; 0 0 1]` % * __distCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. % * __imageSize__ image size `[w,h]`. % * __destImageWidth__ % @@ -20,17 +20,16 @@ % % ## Options % * __M1Type__ Type of the first output map, default `single2`. See -% cv.convertMaps. Accepted types are: -% * __int16__ first output map is a MxNx2 `int16` array, second output -% map is MxNx1 `uint16` (fixed-point representation). -% * __single1__ first output map is a MxNx1 `single` matrix, second -% output map is MxNx1 `single` (separate floating-point -% representation). -% * __single2__ first output map is a MxNx2 `single` matrix, second -% output map is empty (combined floating-point representation). +% cv.convertMaps. Accepted types are: +% * __int16__ first output map is a MxNx2 `int16` array, second output map +% is MxNx1 `uint16` (fixed-point representation). +% * __single1__ first output map is a MxNx1 `single` matrix, second output +% map is MxNx1 `single` (separate floating-point representation). +% * __single2__ first output map is a MxNx2 `single` matrix, second output +% map is empty (combined floating-point representation). % * __ProjType__ projection type, default 'EqRect'. One of: -% * __Ortho__ -% * __EqRect__ +% * __Ortho__ +% * __EqRect__ % * __Alpha__ default 0 % % See also: cv.initUndistortRectifyMap, cv.remap, cv.convertMaps diff --git a/+cv/inpaint.m b/+cv/inpaint.m index 0bed226b2..8ead86737 100644 --- a/+cv/inpaint.m +++ b/+cv/inpaint.m @@ -1,28 +1,28 @@ %INPAINT Restores the selected region in an image using the region neighborhood % -% dst = cv.inpaint(src, mask) -% dst = cv.inpaint(src, mask, 'OptionName', optionValue, ...) +% dst = cv.inpaint(src, mask) +% dst = cv.inpaint(src, mask, 'OptionName', optionValue, ...) % % ## Input % * __src__ Input 8-bit, 16-bit unsigned or 32-bit float 1-channel, or 8-bit -% 3-channel image. +% 3-channel image. % * __mask__ Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate -% the area that needs to be inpainted. +% the area that needs to be inpainted. % % ## Output % * __dst__ Output image with the same size and type as `src`. % % ## Options % * __Radius__ Radius of a circlular neighborhood of each point inpainted that -% is considered by the algorithm. default 3.0 +% is considered by the algorithm. default 3.0 % * __Method__ Inpainting method that could be one of the following: -% * __NS__ (default) Navier-Stokes based method [Navier01]. -% * __Telea__ Method by Alexandru Telea [Telea04]. +% * __NS__ (default) Navier-Stokes based method [Navier01]. +% * __Telea__ Method by Alexandru Telea [Telea04]. % % The function reconstructs the selected image area from the pixel near the % area boundary. The function may be used to remove dust and scratches from a % scanned photo, or to remove undesirable objects from still images or video. -% See [Inpainting](http://en.wikipedia.org/wiki/Inpainting) for more details. +% See [Inpainting](https://en.wikipedia.org/wiki/Inpainting) for more details. % % ## References % [Navier01]: diff --git a/+cv/integral.m b/+cv/integral.m index 7ed12e95f..0e62998df 100644 --- a/+cv/integral.m +++ b/+cv/integral.m @@ -1,40 +1,40 @@ %INTEGRAL Calculates the integral of an image % -% s = cv.integral(src) -% [s, sqsum, tilted] = cv.integral(src) -% [...] = cv.integral(src, 'OptionName',optionValue, ...) +% s = cv.integral(src) +% [s, sqsum, tilted] = cv.integral(src) +% [...] = cv.integral(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image as `W x H`, 8-bit, 16-bit or floating-point -% (`single` or `double`). +% (`single` or `double`). % % ## Output % * __s__ Integral image as `(W+1) x (H+1)`, 32-bit integer or floating-point -% (`single` or `double`). +% (`single` or `double`). % * __sqsum__ Integral image for squared pixel values. It is `(W+1) x (H+1)`, -% double-precision floating-point array. +% double-precision floating-point array. % * __tilted__ Integral for the image rotated by 45 degrees. It is -% `(W+1) x (H+1)` array with the same data type as `s`. +% `(W+1) x (H+1)` array with the same data type as `s`. % % ## Options % * __SDepth__ desired depth of the integral and the tilted integral images, -% `int32`, `single`, or `double`. default -1 +% `int32`, `single`, or `double`. default -1 % * __SQDepth__ desired depth of the integral image of squared pixel values, -% `single` or `double`. default -1 +% `single` or `double`. default -1 % % The function calculates one or more integral images for the source image as % follows: % -% s(X,Y) = sum_{xBGR, output: BGR->RGB). default false +% `src` and output `dst2`, between MATLAB's RGB order and OpenCV's BGR +% (input: RGB->BGR, output: BGR->RGB). default false % % See also: cv.stylization % diff --git a/+cv/perspectiveTransform.m b/+cv/perspectiveTransform.m index 269077b99..bdc0d4770 100644 --- a/+cv/perspectiveTransform.m +++ b/+cv/perspectiveTransform.m @@ -1,38 +1,38 @@ %PERSPECTIVETRANSFORM Performs the perspective matrix transformation of vectors % -% dst = cv.perspectiveTransform(src, mtx) +% dst = cv.perspectiveTransform(src, mtx) % % ## Input % * __src__ Source two-channel or three-channel floating-point array. Each -% element is a 2D/3D vector to be transformed specified as a numeric -% Nx2/Nx1x2/1xNx2 or Nx3/Nx1x3/1xNx3 array. Also can be specified as a -% cell-array of 2D/3D points `{[x,y],...}` or `{[x,y,z],...}`. +% element is a 2D/3D vector to be transformed specified as a numeric +% Nx2/Nx1x2/1xNx2 or Nx3/Nx1x3/1xNx3 array. Also can be specified as a +% cell-array of 2D/3D points `{[x,y],...}` or `{[x,y,z],...}`. % * __mtx__ 3x3 or 4x4 floating-point transformation matrix. In case `src` has -% 2D points, `mtx` must be a 3x3 matrix, otherwise 4x4 matrix in case of -% 3D points. +% 2D points, `mtx` must be a 3x3 matrix, otherwise 4x4 matrix in case of 3D +% points. % % ## Output -% * __dst__ Destination array of the same size and type as `src` -% (either numeric or cell array). +% * __dst__ Destination array of the same size and type as `src` (either +% numeric or cell array). % % The function cv.perspectiveTransform transforms every element of `src` by % treating it as a 2D or 3D vector, in the following way: % -% [x,y,z] -> [X/w, Y/w, Z/w] +% [x,y,z] -> [X/w, Y/w, Z/w] % % where % -% [X,Y,Z,W] = mtx * [x,y,z,1] +% [X,Y,Z,W] = mtx * [x,y,z,1] % % and -% | W if W ~=0 -% w = | -% | inf otherwise +% | W if W ~=0 +% w = | +% | inf otherwise % % Here a 3D vector transformation is shown. In case of a 2D vector % transformation, the `z` component is omitted. % -% ## Note +% ### Note % The function transforms a sparse set of 2D or 3D vectors. If you want to % transform an image using perspective transformation, use cv.warpPerspective. % If you have an inverse problem, that is, you want to compute the most diff --git a/+cv/phase.m b/+cv/phase.m index 089d2e187..b07df612c 100644 --- a/+cv/phase.m +++ b/+cv/phase.m @@ -1,25 +1,25 @@ %PHASE Calculates the rotation angle of 2D vectors % -% ang = cv.phase(x, y) -% ang = cv.phase(..., 'OptionName',optionValue, ...) +% ang = cv.phase(x, y) +% ang = cv.phase(..., 'OptionName',optionValue, ...) % % ## Input % * __x__ floating-point array of x-coordinates of 2D vectors. % * __y__ floating-point array of y-coordinates of 2D vectors. It must have -% the same size and type as `x`. +% the same size and type as `x`. % % ## Output % * __ang__ output array of vector angles; it has the same size and type as -% `x`. +% `x`. % % ## Options % * __Degrees__ when true, the function calculates the angle in degrees, -% otherwise, they are measured in radians. default false +% otherwise, they are measured in radians. default false % % The function cv.phase calculates the rotation angle of each 2D vector that % is formed from the corresponding elements of `x` and `y`: % -% angle(I) = atan2(y(I), x(I)) +% angle(I) = atan2(y(I), x(I)) % % The angle estimation accuracy is about 0.3 degrees. When `x(I)=y(I)=0`, the % corresponding `angle(I)` is set to 0. diff --git a/+cv/phaseCorrelate.m b/+cv/phaseCorrelate.m index 1d69350dc..9c00a3903 100644 --- a/+cv/phaseCorrelate.m +++ b/+cv/phaseCorrelate.m @@ -1,23 +1,23 @@ %PHASECORRELATE Detect translational shifts that occur between two images % -% pshift = cv.phaseCorrelate(src1, src2) -% [pshift,response] = cv.phaseCorrelate(src1, src2) -% [...] = cv.phaseCorrelate(..., 'OptionName',optionValue, ...) +% pshift = cv.phaseCorrelate(src1, src2) +% [pshift,response] = cv.phaseCorrelate(src1, src2) +% [...] = cv.phaseCorrelate(..., 'OptionName',optionValue, ...) % % ## Input % * __src1__ First source floating-point array (single-channel `single` or -% `double`). +% `double`). % * __src2__ Second source floating-point array (single-channel `single` or -% `double`), of same size and type as `src1`. +% `double`), of same size and type as `src1`. % % ## Output % * __pshift__ detected phase shift (sub-pixel) between the two arrays `[x,y]` % * __response__ Signal power within the 5x5 centroid around the peak, between -% 0 and 1 (optional). +% 0 and 1 (optional). % % ## Options % * __Window__ Floating-point array with windowing coefficients to reduce edge -% effects (optional). Not set by default. +% effects (optional). Not set by default. % % The function is used to detect translational shifts that occur between two % images. @@ -25,7 +25,7 @@ % The operation takes advantage of the Fourier shift theorem for detecting the % translational shift in the frequency domain. It can be used for fast image % registration as well as motion estimation. For more information please see -% [Phase correlation](http://en.wikipedia.org/wiki/Phase_correlation). +% [Phase correlation](https://en.wikipedia.org/wiki/Phase_correlation). % % Calculates the cross-power spectrum of two supplied source arrays. The % arrays are padded if needed with cv.getOptimalDFTSize. @@ -33,29 +33,29 @@ % The function performs the following equations: % % * First it applies a -% [Hanning window](http://en.wikipedia.org/wiki/Hann_function) to each image +% [Hanning window](https://en.wikipedia.org/wiki/Hann_function) to each image % to remove possible edge effects. This window is cached until the array % size changes to speed up processing time. % % * Next it computes the forward DFTs of each source array: % -% G_a = F{src1}, G_b = F{src2} +% G_a = F{src1}, G_b = F{src2} % -% where `F` is the forward DFT. +% where `F` is the forward DFT. % % * It then computes the cross-power spectrum of each frequency domain array: % -% R = G_a * G_b^(*) / |G_a * G_b^(*)| +% R = G_a * G_b^(*) / |G_a * G_b^(*)| % % * Next the cross-correlation is converted back into the time domain via the % inverse DFT: % -% r = F^(-1){R} +% r = F^(-1){R} % % * Finally, it computes the peak location and computes a 5x5 weighted % centroid around the peak to achieve sub-pixel accuracy. % -% (\Delta{x}, \Delta{y}) = weightedCentroid{argmax_(x,y){r}} +% (\Delta{x}, \Delta{y}) = weightedCentroid{argmax_(x,y){r}} % % * If non-zero, the response parameter is computed as the sum of the elements % of `r` within the 5x5 centroid around the peak location. It is normalized diff --git a/+cv/pointPolygonTest.m b/+cv/pointPolygonTest.m index 9f13f98ef..978e3e64d 100644 --- a/+cv/pointPolygonTest.m +++ b/+cv/pointPolygonTest.m @@ -1,11 +1,11 @@ %POINTPOLYGONTEST Performs a point-in-contour test % -% d = cv.pointPolygonTest(contour, pt) -% d = cv.pointPolygonTest(..., 'OptionName',optionValue, ...) +% d = cv.pointPolygonTest(contour, pt) +% d = cv.pointPolygonTest(..., 'OptionName',optionValue, ...) % % ## Input % * __contour__ Input contour, stored in numeric array (Nx2/Nx1x2/1xNx2) or -% cell array of 2-element vectors (`{[x,y], ...}`). +% cell array of 2-element vectors (`{[x,y], ...}`). % * __pt__ Point tested against the contour `[x,y]`. % % ## Output @@ -13,8 +13,8 @@ % % ## Options % * __MeasureDist__ If true, the function estimates the signed distance from -% the point to the nearest contour edge. Otherwise, the function only -% checks if the point is inside a contour or not. default false +% the point to the nearest contour edge. Otherwise, the function only checks +% if the point is inside a contour or not. default false % % The function determines whether the point is inside a contour, outside, or % lies on an edge (or coincides with a vertex). It returns positive (inside), diff --git a/+cv/polarToCart.m b/+cv/polarToCart.m index 420d2dff4..e0c65a735 100644 --- a/+cv/polarToCart.m +++ b/+cv/polarToCart.m @@ -1,28 +1,28 @@ %POLARTOCART Calculates x and y coordinates of 2D vectors from their magnitude and angle % -% [x, y] = cv.polarToCart(mag, ang) -% [...] = cv.polarToCart(..., 'OptionName',optionValue, ...) +% [x, y] = cv.polarToCart(mag, ang) +% [...] = cv.polarToCart(..., 'OptionName',optionValue, ...) % % ## Input % * __mag__ floating-point array of magnitudes of 2D vectors; it must have the -% same size and type as `ang`. +% same size and type as `ang`. % * __ang__ floating-point array of angles of 2D vectors. % % ## Output % * __x__ output array of x-coordinates of 2D vectors; it has the same size -% and type as `ang`. +% and type as `ang`. % * __y__ output array of y-coordinates of 2D vectors; it has the same size -% and type as `ang`. +% and type as `ang`. % % ## Options % * __Degrees__ when true, the input angles are measured in degrees, -% otherwise, they are measured in radians. default false +% otherwise, they are measured in radians. default false % % The function cv.polarToCart calculates the Cartesian coordinates of each 2D % vector represented by the corresponding elements of magnitude and angle: % -% x(I) = magnitude(I) * cos(angle(I)) -% y(I) = magnitude(I) * sin(angle(I)) +% x(I) = magnitude(I) * cos(angle(I)) +% y(I) = magnitude(I) * sin(angle(I)) % % The relative accuracy of the estimated coordinates is about 1e-6. % diff --git a/+cv/polylines.m b/+cv/polylines.m index 7225a63ef..03fe703ff 100644 --- a/+cv/polylines.m +++ b/+cv/polylines.m @@ -1,28 +1,28 @@ %POLYLINES Draws several polygonal curves % -% img = cv.polylines(img, pts) -% [...] = cv.polylines(..., 'OptionName', optionValue, ...) +% img = cv.polylines(img, pts) +% [...] = cv.polylines(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image. % * __pts__ Array of polygonal curves, where each polygon is represented as an -% array of points. A cell array of cell arrays of 2-element vectors, in -% the form `{{[x,y], [x,y], ...}, ...}`, or a cell array of Nx2 matries. +% array of points. A cell array of cell arrays of 2-element vectors, in the +% form `{{[x,y], [x,y], ...}, ...}`, or a cell array of Nx2 matries. % % ## Output % * __img__ Output image, same size and type as input `img`. % % ## Options % * __Closed__ Flag indicating whether the drawn polylines are closed or not. -% If they are closed, the function draws a line from the last vertex of -% each curve to its first vertex. default true. +% If they are closed, the function draws a line from the last vertex of each +% curve to its first vertex. default true. % * __Color__ 3-element floating-point vector specifying polyline color. -% default zeros +% default zeros % * __Thickness__ Thickness of the polyline edges. default 1. % * __LineType__ Type of the line segments. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the vertex coordinates. default 0. % % The function cv.polylines draws one or more polygonal curves. diff --git a/+cv/preCornerDetect.m b/+cv/preCornerDetect.m index 77d1ca74a..1a4354621 100644 --- a/+cv/preCornerDetect.m +++ b/+cv/preCornerDetect.m @@ -1,7 +1,7 @@ %PRECORNERDETECT Calculates a feature map for corner detection % -% dst = cv.preCornerDetect(src) -% dst = cv.preCornerDetect(src, 'OptionName', optionValue, ...) +% dst = cv.preCornerDetect(src) +% dst = cv.preCornerDetect(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Source single-channel 8-bit of floating-point image. @@ -12,23 +12,23 @@ % ## Options % * __KSize__ Aperture size of the cv.Sobel operator. default 3. % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % % The function calculates the complex spatial derivative-based function of the % source image. % -% dst = (Dx*src)^2*Dyy*src + (Dy*src)^2*Dxx*src - 2*Dx*src*Dy*src*Dxy*src +% dst = (Dx*src)^2*Dyy*src + (Dy*src)^2*Dxx*src - 2*Dx*src*Dy*src*Dxy*src % % where `Dx`, `Dy` are the first image derivatives, `Dxx`, `Dyy` are the % second image derivatives, and `Dxy` is the mixed derivative. % % The corners can be found as local maximums of the functions, as shown below: % -% corners = cv.preCornerDetect(image, 'KSize',3); +% corners = cv.preCornerDetect(image, 'KSize',3); % -% % dilation with 3x3 rectangular structuring element -% dilated_corners = cv.dilate(corners); -% corner_mask = (corners == dilated_corners); +% % dilation with 3x3 rectangular structuring element +% dilated_corners = cv.dilate(corners); +% corner_mask = (corners == dilated_corners); % % See also: cv.cornerMinEigenVal % diff --git a/+cv/private/.gitignore b/+cv/private/.gitkeep similarity index 100% rename from +cv/private/.gitignore rename to +cv/private/.gitkeep diff --git a/+cv/projectPoints.m b/+cv/projectPoints.m index 08829942e..f00ec942e 100644 --- a/+cv/projectPoints.m +++ b/+cv/projectPoints.m @@ -1,36 +1,35 @@ %PROJECTPOINTS Projects 3D points to an image plane % -% imagePoints = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix) -% [imagePoints, jacobian] = cv.projectPoints(...) -% [...] = cv.projectPoints(..., 'OptionName', optionValue, ...) +% imagePoints = cv.projectPoints(objectPoints, rvec, tvec, cameraMatrix) +% [imagePoints, jacobian] = cv.projectPoints(...) +% [...] = cv.projectPoints(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ Array of object points, Nx3/Nx1x3/1xNx3 array or cell -% array of 3-element vectors `{[x,y,z],...}`, where `N` is the number of -% points in the view. +% array of 3-element vectors `{[x,y,z],...}`, where `N` is the number of +% points in the view. % * __rvec__ Rotation vector or matrix (3x1/1x3 or 3x3). See cv.Rodrigues for -% details. +% details. % * __tvec__ Translation vector (3x1/1x3). % * __cameraMatrix__ Camera matrix 3x3, `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % % ## Output % * __imagePoints__ Output array of image points, Nx2/Nx1x2/1xNx2 array or -% cell array of 2-element vectors `{[x,y], ...}`. +% cell array of 2-element vectors `{[x,y], ...}`. % * __jacobian__ Optional output `(2N)x(3+3+2+2+numel(DistCoeffs))` jacobian -% matrix of derivatives of image points with respect to components of -% the rotation vector (3), translation vector (3), focal lengths (2), -% coordinates of the principal point (2), and the distortion -% coefficients (`numel(DistCoeffs)`). +% matrix of derivatives of image points with respect to components of the +% rotation vector (3), translation vector (3), focal lengths (2), +% coordinates of the principal point (2), and the distortion coefficients +% (`numel(DistCoeffs)`). % % ## Options % * __DistCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. default empty -% * __AspectRatio__ Optional "fixed aspect ratio" parameter. If the -% parameter is not 0, the function assumes that the aspect ratio -% (`fx/fy`) is fixed and correspondingly adjusts the jacobian matrix. -% default 0. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. default empty +% * __AspectRatio__ Optional "fixed aspect ratio" parameter. If the parameter +% is not 0, the function assumes that the aspect ratio (`fx/fy`) is fixed +% and correspondingly adjusts the jacobian matrix. default 0. % % The function computes projections of 3D points to the image plane given % intrinsic and extrinsic camera parameters. Optionally, the function @@ -41,7 +40,7 @@ % and cv.stereoCalibrate. The function itself can also be used to compute a % re-projection error given the current intrinsic and extrinsic parameters. % -% ## Note +% ### Note % By setting `rvec=tvec=[0,0,0]` or by setting `cameraMatrix` to a 3x3 % identity matrix, or by passing zero distortion coefficients, you can get % various useful partial cases of the function. This means that you can diff --git a/+cv/putText.m b/+cv/putText.m index d05c42d23..44c5dea0b 100644 --- a/+cv/putText.m +++ b/+cv/putText.m @@ -1,7 +1,7 @@ %PUTTEXT Draws a text string % -% img = cv.putText(img, txt, org) -% [...] = cv.putText(..., 'OptionName', optionValue, ...) +% img = cv.putText(img, txt, org) +% [...] = cv.putText(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image. @@ -13,32 +13,28 @@ % % ## Options % * __FontFace__ Font type to use. One of the following: -% * __HersheySimplex__ Normal size sans-serif (default) -% * __HersheyPlain__ Small size sans-serif -% * __HersheyDuplex__ Normal size sans-serif; more complex than -% `HersheySimplex` -% * __HersheyComplex__ Normal size serif; more complex than -% `HersheyDuplex` -% * __HersheyTriplex__ Normal size serif; more complex than -% `HersheyComplex` -% * __HersheyComplexSmall__ Smaller version of `HersheyComplex` -% * __HersheyScriptSimplex__ Handwriting style -% * __HersheyScriptComplex__ More complex variant of -% `HersheyScriptSimplex` +% * __HersheySimplex__ Normal size sans-serif (default) +% * __HersheyPlain__ Small size sans-serif +% * __HersheyDuplex__ Normal size sans-serif; more complex than +% `HersheySimplex` +% * __HersheyComplex__ Normal size serif; more complex than `HersheyDuplex` +% * __HersheyTriplex__ Normal size serif; more complex than `HersheyComplex` +% * __HersheyComplexSmall__ Smaller version of `HersheyComplex` +% * __HersheyScriptSimplex__ Handwriting style +% * __HersheyScriptComplex__ More complex variant of `HersheyScriptSimplex` % * __FontStyle__ Font style. One of: -% * __Regular__ (default) -% * __Italic__ +% * __Regular__ (default) +% * __Italic__ % * __FontScale__ Font scale factor that is multiplied by the font-specific -% base size. default 1.0 +% base size. default 1.0 % * __Color__ 3-element floating-point vector specifying text color. % * __Thickness__ Thickness of the lines used to draw a text. default 1. % * __LineType__ Line type (see cv.line). One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __BottomLeftOrigin__ When true, the image data origin is at the -% bottom-left corner. Otherwise, it is at the top-left corner. -% default false. +% bottom-left corner. Otherwise, it is at the top-left corner. default false % % The function cv.putText renders the specified text string in the image. % Symbols that cannot be rendered using the specified font are replaced by diff --git a/+cv/pyrDown.m b/+cv/pyrDown.m index 8f255b6fe..1f7b23316 100644 --- a/+cv/pyrDown.m +++ b/+cv/pyrDown.m @@ -1,35 +1,35 @@ %PYRDOWN Blurs an image and downsamples it % -% dst = cv.pyrDown(src) -% dst = cv.pyrDown(src, 'OptionName',optionValue, ...) +% dst = cv.pyrDown(src) +% dst = cv.pyrDown(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input image; `uint8`, `uint16`, `int16`, `single`, or `double`. % % ## Output -% * __dst__ Destination image. It has the specified size and the same type -% as `src`. +% * __dst__ Destination image. It has the specified size and the same type as +% `src`. % % ## Options % * __DstSize__ Size of the output image `[w,h]`. default [0,0], see below. % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder -% ('Constant' isn't supported). Default 'Default' +% ('Constant' isn't supported). Default 'Default' % % By default, size of the output image is computed as % `[(size(src,2)+1)/2 (size(src,1)+1)/2]`, but in any case, the following % conditions should be satisfied: % -% abs(DstSize(2)*2 - size(src,2)) <= 2 -% abs(DstSize(1)*2 - size(src,1)) <= 2 +% abs(DstSize(2)*2 - size(src,2)) <= 2 +% abs(DstSize(1)*2 - size(src,1)) <= 2 % % The function performs the downsampling step of the Gaussian pyramid % construction. First, it convolves the source image with the kernel: % -% 1/256 * [1 4 6 4 1; -% 4 16 24 16 4; -% 6 24 34 24 6; -% 4 16 24 16 4; -% 1 4 6 4 1] +% 1/256 * [1 4 6 4 1; +% 4 16 24 16 4; +% 6 24 34 24 6; +% 4 16 24 16 4; +% 1 4 6 4 1] % % Then, it downsamples the image by rejecting even rows and columns. % diff --git a/+cv/pyrMeanShiftFiltering.m b/+cv/pyrMeanShiftFiltering.m index 9efbb488e..7471df83c 100644 --- a/+cv/pyrMeanShiftFiltering.m +++ b/+cv/pyrMeanShiftFiltering.m @@ -1,26 +1,26 @@ %PYRMEANSHIFTFILTERING Performs initial step of meanshift segmentation of an image % -% dst = cv.pyrMeanShiftFiltering(src) -% dst = cv.pyrMeanShiftFiltering(src, 'OptionName',optionValue, ...) +% dst = cv.pyrMeanShiftFiltering(src) +% dst = cv.pyrMeanShiftFiltering(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ The source 8-bit, 3-channel image. % % ## Output % * __dst__ The destination image of the same format and the same size as the -% source `src`. +% source `src`. % % ## Options % * __SP__ The spatial window radius. default 5 % * __SR__ The color window radius. default 10 % * __MaxLevel__ Maximum level of the pyramid for the segmentation. default 1 % * __Criteria__ Termination criteria: when to stop meanshift iterations. -% default `struct('type','Count+EPS', 'maxCount',5, 'epsilon',1.0)`. -% Struct with the following fields is accepted: -% * __type__ one of 'Count', 'EPS', or 'Count+EPS' to indicate which -% criteria to use. -% * __maxCount__ maximum number of iterations -% * __epsilon__ +% default `struct('type','Count+EPS', 'maxCount',5, 'epsilon',1.0)`. +% Struct with the following fields is accepted: +% * __type__ one of 'Count', 'EPS', or 'Count+EPS' to indicate which +% criteria to use. +% * __maxCount__ maximum number of iterations +% * __epsilon__ % % The function implements the filtering stage of meanshift segmentation, that % is, the output of the function is the filtered "posterized" image with color @@ -29,7 +29,7 @@ % meanshift iterations, that is, the pixel `(X,Y)` neighborhood in the joint % space-color hyperspace is considered: % -% (x,y): X-sp <= x <= X+sp, Y-sp <= y <= Y+sp, ||(R,G,B) - (r,g,b)|| <= sr +% (x,y): X-sp <= x <= X+sp, Y-sp <= y <= Y+sp, ||(R,G,B) - (r,g,b)|| <= sr % % where `(R,G,B)` and `(r,g,b)` are the vectors of color components at `(X,Y)` % and `(x,y)`, respectively (though, the algorithm does not depend on the @@ -38,13 +38,13 @@ % vector `(R',G',B')` are found and they act as the neighborhood center on the % next iteration: % -% (X,Y) (X',Y'), (R,G,B) (R',G',B'). +% (X,Y) (X',Y'), (R,G,B) (R',G',B'). % % After the iterations over, the color components of the initial pixel (that % is, the pixel from where the iterations started) are set to the final value % (average color at the last iteration): % -% I(X,Y) = (R*,G*,B*) +% I(X,Y) = (R*,G*,B*) % % When `MaxLevel > 0`, the gaussian pyramid of `MaxLevel+1` levels is built, % and the above procedure is run on the smallest layer first. After that, the diff --git a/+cv/pyrUp.m b/+cv/pyrUp.m index 5ad071829..3c6375593 100644 --- a/+cv/pyrUp.m +++ b/+cv/pyrUp.m @@ -1,26 +1,26 @@ %PYRUP Upsamples an image and then blurs it % -% dst = cv.pyrUp(src) -% dst = cv.pyrUp(src, 'OptionName',optionValue, ...) +% dst = cv.pyrUp(src) +% dst = cv.pyrUp(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input image; `uint8`, `uint16`, `int16`, `single`, or `double`. % % ## Output -% * __dst__ Destination image. It has the specified size and the same type -% as `src`. +% * __dst__ Destination image. It has the specified size and the same type as +% `src`. % % ## Options % * __DstSize__ Size of the output image `[w,h]`. default [0,0], see below. % * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder -% ('Constant' isn't supported). Default 'Default' +% ('Constant' isn't supported). Default 'Default' % % By default, size of the output image is computed as % `[size(src,2)*2 size(src,1)*2]`, but in any case, the following conditions % should be satisfied: % -% abs(DstSize(2) - size(src,2)*2) <= mod(DstSize(2),2) -% abs(DstSize(1) - size(src,1)*2) <= mod(DstSize(1),2) +% abs(DstSize(2) - size(src,2)*2) <= mod(DstSize(2),2) +% abs(DstSize(1) - size(src,1)*2) <= mod(DstSize(1),2) % % The function performs the upsampling step of the Gaussian pyramid % construction, though it can actually be used to construct the Laplacian diff --git a/+cv/randGaussMixture.m_ b/+cv/randGaussMixture.m_ index 4a98105ea..797105505 100644 --- a/+cv/randGaussMixture.m_ +++ b/+cv/randGaussMixture.m_ @@ -1,6 +1,6 @@ %RANDGAUSSMIXTURE Generates sample from gaussian mixture distribution % -% [samples, sampClasses] = cv.randGaussMixture(means, covs, weights, nsamples) +% [samples, sampClasses] = cv.randGaussMixture(means, covs, weights, nsamples) % % ## Input % * __means__ matrix of mean row vectors `Kxd` @@ -11,7 +11,7 @@ % ## Output % * __samples__ returned samples array `nsamples-by-d`. % * __sampClasses__ indices of corresponding components from which samples -% were generated `nsamples-by-1`. +% were generated `nsamples-by-1`. % % See also: cv.randMVNormal, gmdistribution.random % diff --git a/+cv/randMVNormal.m b/+cv/randMVNormal.m index 932551535..37192dfd8 100644 --- a/+cv/randMVNormal.m +++ b/+cv/randMVNormal.m @@ -1,6 +1,6 @@ %RANDMVNORMAL Generates sample from multivariate normal distribution % -% samples = cv.randMVNormal(mu, sigma, nsamples) +% samples = cv.randMVNormal(mu, sigma, nsamples) % % ## Input % * __mu__ mean, an average row vector `1xd`. diff --git a/+cv/recoverPose.m b/+cv/recoverPose.m index 1cc3bae4f..0c7283cb2 100644 --- a/+cv/recoverPose.m +++ b/+cv/recoverPose.m @@ -1,37 +1,37 @@ %RECOVERPOSE Recover relative camera rotation and translation from an estimated essential matrix and the corresponding points in two images, using cheirality check % -% [R, t, good] = cv.recoverPose(E, points1, points2) -% [R, t, good, mask, triangulatedPoints] = cv.recoverPose(...) -% [...] = cv.recoverPose(..., 'OptionName', optionValue, ...) +% [R, t, good] = cv.recoverPose(E, points1, points2) +% [R, t, good, mask, triangulatedPoints] = cv.recoverPose(...) +% [...] = cv.recoverPose(..., 'OptionName', optionValue, ...) % % ## Input % * __E__ The input essential matrix, 3x3. -% * __points1__ Cell array of N 2D points from the first image, or numeric array -% Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point -% (single or double precision). +% * __points1__ Cell array of N 2D points from the first image, or numeric +% array Nx2/Nx1x2/1xNx2. The point coordinates should be floating-point +% (single or double precision). % * __points2__ Cell array or numeric array of the second image points of the -% same size and format as `points1`. +% same size and format as `points1`. % % ## Output % * __R__ Recovered relative rotation, 3x3 matrix. % * __t__ Recovered relative translation, 3x1 vector. % * __good__ the number of inliers which pass the cheirality check. % * __mask__ Output mask for inliers in `points1` and `points2`. In the output -% mask only inliers which pass the cheirality check. Vector of length N, -% see the `Mask` input option. +% mask only inliers which pass the cheirality check. Vector of length N, see +% the `Mask` input option. % * __triangulatedPoints__ 3D points which were reconstructed by triangulation. % % ## Options % * __CameraMatrix__ Camera matrix `K = [fx 0 cx; 0 fy cy; 0 0 1]`. Note that -% this function assumes that `points1` and `points2` are feature points -% from cameras with the same camera matrix. default `eye(3)`. +% this function assumes that `points1` and `points2` are feature points from +% cameras with the same camera matrix. default `eye(3)`. % * __DistanceThreshold__ threshold distance which is used to filter out far -% away points (i.e. infinite points). default 50.0 +% away points (i.e. infinite points). default 50.0 % * __Mask__ Input mask of length N for inliers in `points1` and `points2` -% (0 for outliers and to 1 for the other points (inliers). If it is not -% empty, then it marks inliers in `points1` and `points2` for then given -% essential matrix `E`. Only these inliers will be used to recover pose. -% Not set by default. +% (0 for outliers and to 1 for the other points (inliers). If it is not +% empty, then it marks inliers in `points1` and `points2` for then given +% essential matrix `E`. Only these inliers will be used to recover pose. +% Not set by default. % % This function decomposes an essential matrix using cv.decomposeEssentialMat % and then verifies possible pose hypotheses by doing cheirality check. The @@ -44,23 +44,23 @@ % % ## Example % -% % Estimation of fundamental matrix using the RANSAC algorithm -% point_count = 100; -% points1 = cell(1, point_count); -% points2 = cell(1, point_count); -% % initialize the points here ... -% for i=1:point_count -% points1{i} = ...; % [x,y] -% points2{i} = ...; % [x,y] -% end +% % Estimation of fundamental matrix using the RANSAC algorithm +% point_count = 100; +% points1 = cell(1, point_count); +% points2 = cell(1, point_count); +% % initialize the points here ... +% for i=1:point_count +% points1{i} = ...; % [x,y] +% points2{i} = ...; % [x,y] +% end % -% % cametra matrix with both focal lengths = 1, and principal point = [0 0] -% cameraMatrix = eye(3,3); +% % cametra matrix with both focal lengths = 1, and principal point = [0 0] +% cameraMatrix = eye(3,3); % -% [E, mask] = cv.findEssentialMat(points1, points2, ... -% 'CameraMatrix',cameraMatrix, 'Method','Ransac'); -% [R, t, ~, mask] = cv.recoverPose(E, points1, points2, ... -% 'CameraMatrix',cameraMatrix, 'Mask',mask); +% [E, mask] = cv.findEssentialMat(points1, points2, ... +% 'CameraMatrix',cameraMatrix, 'Method','Ransac'); +% [R, t, ~, mask] = cv.recoverPose(E, points1, points2, ... +% 'CameraMatrix',cameraMatrix, 'Mask',mask); % % ## References % [Nister03]: diff --git a/+cv/rectangle.m b/+cv/rectangle.m index 5b025dba2..102d96325 100644 --- a/+cv/rectangle.m +++ b/+cv/rectangle.m @@ -1,8 +1,8 @@ %RECTANGLE Draws a simple, thick, or filled up-right rectangle % -% img = cv.rectangle(img, pt1, pt2) -% img = cv.rectangle(img, rect) -% [...] = cv.rectangle(..., 'OptionName', optionValue, ...) +% img = cv.rectangle(img, pt1, pt2) +% img = cv.rectangle(img, rect) +% [...] = cv.rectangle(..., 'OptionName', optionValue, ...) % % ## Input % * __img__ Image. @@ -16,12 +16,12 @@ % ## Options % * __Color__ Rectangle color or brightness (grayscale image). default zeros % * __Thickness__ Thickness of lines that make up the rectangle. Negative -% values (like -1) or the string 'Filled', mean that the function has -% to draw a filled rectangle. default 1. +% values (like -1) or the string 'Filled', mean that the function has to +% draw a filled rectangle. default 1. % * __LineType__ Type of the line boundary. One of: -% * __4__ 4-connected line -% * __8__ 8-connected line (default) -% * __AA__ anti-aliased line +% * __4__ 4-connected line +% * __8__ 8-connected line (default) +% * __AA__ anti-aliased line % * __Shift__ Number of fractional bits in the point coordinates. default 0 % % The function cv.rectangle draws a rectangle outline or a filled rectangle diff --git a/+cv/rectify3Collinear.m b/+cv/rectify3Collinear.m index 278406906..223b8b8f9 100644 --- a/+cv/rectify3Collinear.m +++ b/+cv/rectify3Collinear.m @@ -1,74 +1,70 @@ %RECTIFY3COLLINEAR Computes the rectification transformations for 3-head camera, where all the heads are on the same line % -% S = cv.rectify3Collinear(cameraMatrix1, distCoeffs1, cameraMatrix2, -% distCoeffs2, cameraMatrix3, distCoeffs3, imageSize, R12, T12, R13, T13) -% [...] = cv.rectify3Collinear(..., 'OptionName', optionValue, ...) +% S = cv.rectify3Collinear(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, cameraMatrix3, distCoeffs3, imageSize, R12, T12, R13, T13) +% [...] = cv.rectify3Collinear(..., 'OptionName', optionValue, ...) % % ## Input % * __cameraMatrix1__ First camera matrix 3x3. % * __distCoeffs1__ First camera distortion parameters of 4, 5, 8, 12 or 14 -% elements. +% elements. % * __cameraMatrix2__ Second camera matrix 3x3. % * __distCoeffs2__ Second camera distortion parameters of 4, 5, 8, 12 or 14 -% elements. +% elements. % * __cameraMatrix3__ Third camera matrix 3x3. % * __distCoeffs3__ Third camera distortion parameters of 4, 5, 8, 12 or 14 -% elements. +% elements. % * __imageSize__ Size of the image used for stereo calibration `[w,h]`. -% * __R12__ Rotation matrix between the coordinate systems of the first and the -% second cameras, 3x3/3x1 (see cv.Rodrigues) +% * __R12__ Rotation matrix between the coordinate systems of the first and +% the second cameras, 3x3/3x1 (see cv.Rodrigues) % * __T12__ Translation vector between coordinate systems of the first and the -% second cameras, 3x1. -% * __R13__ Rotation matrix between the coordinate systems of the first and the -% third cameras, 3x3/3x1 (see cv.Rodrigues) +% second cameras, 3x1. +% * __R13__ Rotation matrix between the coordinate systems of the first and +% the third cameras, 3x3/3x1 (see cv.Rodrigues) % * __T13__ Translation vector between coordinate systems of the first and the -% third cameras, 3x1. +% third cameras, 3x1. % % ## Output % * __S__ scalar struct having the following fields: -% * __R1__ 3x3 rectification transform (rotation matrix) for the first -% camera. -% * __R2__ 3x3 rectification transform (rotation matrix) for the second -% camera. -% * __R3__ 3x3 rectification transform (rotation matrix) for the third -% camera. -% * __P1__ 3x4 projection matrix in the new (rectified) coordinate -% systems for the first camera. -% * __P2__ 3x4 projection matrix in the new (rectified) coordinate -% systems for the second camera. -% * __P3__ 3x4 projection matrix in the new (rectified) coordinate -% systems for the third camera. -% * __Q__ 4x4 disparity-to-depth mapping matrix (see -% cv.reprojectImageTo3D). -% * __roi1__, __roi2__ rectangles inside the rectified images where all -% the pixels are valid `[x,y,w,h]`. If `Alpha=0`, the ROIs cover -% the whole images. Otherwise, they are likely to be smaller. -% * __ratio__ disparity ratio, floating-point numeric scalar. +% * __R1__ 3x3 rectification transform (rotation matrix) for the first +% camera. +% * __R2__ 3x3 rectification transform (rotation matrix) for the second +% camera. +% * __R3__ 3x3 rectification transform (rotation matrix) for the third +% camera. +% * __P1__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the first camera. +% * __P2__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the second camera. +% * __P3__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the third camera. +% * __Q__ 4x4 disparity-to-depth mapping matrix (see cv.reprojectImageTo3D). +% * __roi1__, __roi2__ rectangles inside the rectified images where all the +% pixels are valid `[x,y,w,h]`. If `Alpha=0`, the ROIs cover the whole +% images. Otherwise, they are likely to be smaller. +% * __ratio__ disparity ratio, floating-point numeric scalar. % % ## Options % * __ImgPoints1__, __ImgPoints3__ Optional cell arrays of 2D points -% `{[x,y], ...}`, used to adjust the third matrix `P3`. -% Not set by default. +% `{[x,y], ...}`, used to adjust the third matrix `P3`. Not set by default. % * __Alpha__ Free scaling parameter. If it is -1 or absent, the function -% performs the default scaling. Otherwise, the parameter should be -% between 0 and 1. `Alpha=0` means that the rectified images are zoomed -% and shifted so that only valid pixels are visible (no black areas -% after rectification). `Alpha=1` means that the rectified image is -% decimated and shifted so that all the pixels from the original images -% from the cameras are retained in the rectified images (no source image -% pixels are lost). Obviously, any intermediate value yields an -% intermediate result between those two extreme cases. default -1 +% performs the default scaling. Otherwise, the parameter should be between 0 +% and 1. `Alpha=0` means that the rectified images are zoomed and shifted so +% that only valid pixels are visible (no black areas after rectification). +% `Alpha=1` means that the rectified image is decimated and shifted so that +% all the pixels from the original images from the cameras are retained in +% the rectified images (no source image pixels are lost). Obviously, any +% intermediate value yields an intermediate result between those two extreme +% cases. default -1 % * __NewImageSize__ New image resolution after rectification. The same size -% should be passed to cv.initUndistortRectifyMap. When [0,0] is passed -% (default), it is set to the original `imageSize`. Setting it to larger -% value can help you preserve details in the original image, especially -% when there is a big radial distortion. +% should be passed to cv.initUndistortRectifyMap. When [0,0] is passed +% (default), it is set to the original `imageSize`. Setting it to larger +% value can help you preserve details in the original image, especially +% when there is a big radial distortion. % * __ZeroDisparity__ If the flag is set, the function makes the principal -% points of each camera have the same pixel coordinates in the rectified -% views. And if the flag is not set, the function may still shift the -% images in the horizontal or vertical direction (depending on the -% orientation of epipolar lines) to maximize the useful image area. -% default true. +% points of each camera have the same pixel coordinates in the rectified +% views. And if the flag is not set, the function may still shift the images +% in the horizontal or vertical direction (depending on the orientation of +% epipolar lines) to maximize the useful image area. default true. % % See also: cv.stereoRectify % diff --git a/+cv/remap.m b/+cv/remap.m index 4ad755ba8..ca0f46530 100644 --- a/+cv/remap.m +++ b/+cv/remap.m @@ -1,18 +1,18 @@ %REMAP Applies a generic geometrical transformation to an image % -% dst = cv.remap(src, map1, map2) -% dst = cv.remap(src, map1) -% dst = cv.remap(..., 'OptionName',optionValue, ...) +% dst = cv.remap(src, map1, map2) +% dst = cv.remap(src, map1) +% dst = cv.remap(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image. % * __map1__ The first map of either (x,y) points or just x values of the -% transformation having the type `int16` (2-channels), `single` -% (1-channels), or `single` (2-channels). See cv.convertMaps for details -% on converting a floating-point representation to fixed-point for speed. +% transformation having the type `int16` (2-channels), `single` +% (1-channels), or `single` (2-channels). See cv.convertMaps for details on +% converting a floating-point representation to fixed-point for speed. % * __map2__ The second map of y values of the transformation having the type -% `uint16` (1-channel) or `single` (1-channel), or none (empty map if -% `map1` is (x,y) points), respectively. +% `uint16` (1-channel) or `single` (1-channel), or none (empty map if `map1` +% is (x,y) points), respectively. % % In other words, the following map combinations are valid: % @@ -27,37 +27,35 @@ % % ## Output % * __dst__ Destination image. It has the same row/column size as `map1` and -% the same type as `src`. +% the same type as `src`. % % ## Options % * __Interpolation__ interpolation method, default 'Linear'. One of: -% * __Nearest__ a nearest-neighbor interpolation -% * __Linear__ a bilinear interpolation (used by default) -% * __Cubic__ a bicubic interpolation over 4x4 pixel neighborhood -% * __Lanczos4__ a Lanczos interpolation over 8x8 pixel neighborhood +% * __Nearest__ a nearest-neighbor interpolation +% * __Linear__ a bilinear interpolation (used by default) +% * __Cubic__ a bicubic interpolation over 4x4 pixel neighborhood +% * __Lanczos4__ a Lanczos interpolation over 8x8 pixel neighborhood % * __BorderType__ Pixel extrapolation method. When 'Transparent', it means -% that the pixels in the destination image corresponding to the -% "outliers" in the source image are not modified by the function. -% default 'Constant' -% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` -% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` -% * __Reflect__ `fedcba|abcdefgh|hgfedcb` -% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` -% * __Wrap__ `cdefgh|abcdefgh|abcdefg` -% * __Transparent__ `uvwxyz|absdefgh|ijklmno` -% * __Default__ same as 'Reflect101' +% that the pixels in the destination image corresponding to the "outliers" +% in the source image are not modified by the function. default 'Constant' +% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` +% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` +% * __Reflect__ `fedcba|abcdefgh|hgfedcb` +% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` +% * __Wrap__ `cdefgh|abcdefgh|abcdefg` +% * __Transparent__ `uvwxyz|absdefgh|ijklmno` +% * __Default__ same as 'Reflect101' % * __BorderValue__ Value used in case of a constant border. default 0 % * __Dst__ Optional initial image for the output. If not set, it is -% automatically created by the function. Note that it must match the -% row/column size of `map1` and the type of `src`, otherwise it is -% ignored and recreated by the function. This option is only useful when -% `BorderType=Transparent`, in which case the transformed image is drawn -% onto the existing `Dst` without extrapolating pixels. Not set by -% default. +% automatically created by the function. Note that it must match the +% row/column size of `map1` and the type of `src`, otherwise it is ignored +% and recreated by the function. This option is only useful when +% `BorderType=Transparent`, in which case the transformed image is drawn +% onto the existing `Dst` without extrapolating pixels. Not set by default. % % The function cv.remap transforms the source image using the specified map: % -% dst(x,y) = src(mapX(x,y), mapY(x,y)) +% dst(x,y) = src(mapX(x,y), mapY(x,y)) % % where values of pixels with non-integer coordinates are computed using one % of available interpolation methods. `mapX` and `mapY` can be encoded as diff --git a/+cv/reprojectImageTo3D.m b/+cv/reprojectImageTo3D.m index 823eb380a..1995f6ff7 100644 --- a/+cv/reprojectImageTo3D.m +++ b/+cv/reprojectImageTo3D.m @@ -1,39 +1,38 @@ %REPROJECTIMAGETO3D Reprojects a disparity image to 3D space % -% im3d = cv.reprojectImageTo3D(disparity, Q) -% [...] = cv.reprojectImageTo3D(..., 'OptionName', optionValue, ...) +% im3d = cv.reprojectImageTo3D(disparity, Q) +% [...] = cv.reprojectImageTo3D(..., 'OptionName', optionValue, ...) % % ## Input % * __disparity__ Input single-channel 8-bit unsigned, 16-bit signed, 32-bit -% signed or 32-bit floating-point disparity image. If 16-bit signed -% format is used, the values are assumed to have no fractional bits. +% signed or 32-bit floating-point disparity image. If 16-bit signed format +% is used, the values are assumed to have no fractional bits. % * __Q__ 4x4 perspective transformation matrix that can be obtained with -% cv.stereoRectify. +% cv.stereoRectify. % % ## Output % * __im3d__ Output 3-channel floating-point image of the same size as -% disparity. Each element of `im3d(x,y,ch)` contains 3D coordinates of -% the point `(x,y)` computed from the disparity map. +% disparity. Each element of `im3d(x,y,ch)` contains 3D coordinates of the +% point `(x,y)` computed from the disparity map. % % ## Options % * __HandleMissingValues__ Indicates, whether the function should handle -% missing values (i.e. points where the disparity was not computed). -% If `HandleMissingValues=true`, then pixels with the minimal -% disparity that corresponds to the outliers (see cv.StereoBM.compute) -% are transformed to 3D points with a very large Z value (currently set -% to 10000). default false. +% missing values (i.e. points where the disparity was not computed). If +% `HandleMissingValues=true`, then pixels with the minimal disparity that +% corresponds to the outliers (see cv.StereoBM.compute) are transformed to +% 3D points with a very large Z value (currently set to 10000). default false % * __Depth__ The optional output array depth. If it is -1 (default), the -% output image will have `single` depth. `Depth` can be set to one of: -% * __int16__ -% * __int32__ -% * __single__ +% output image will have `single` depth. `Depth` can be set to one of: +% * __int16__ +% * __int32__ +% * __single__ % % The function transforms a single-channel disparity map to a 3-channel image % representing a 3D surface. That is, for each pixel `(x,y)` and the % corresponding disparity `d=disparity(x,y)`, it computes: % -% [X, Y, Z, W]^T = Q * [x, y, disparity(x,y), 1]^T -% im3d(x,y,:) = [X/W, Y/W, Z/W] +% [X, Y, Z, W]^T = Q * [x, y, disparity(x,y), 1]^T +% im3d(x,y,:) = [X/W, Y/W, Z/W] % % The matrix `Q` can be an arbitrary 4x4 matrix (for example, the one computed % by cv.stereoRectify). To reproject a sparse set of points `{(x,y,d),...}` @@ -42,16 +41,16 @@ % ## Example % The function is similar to the following simplified MATLAB code: % -% function XYZ = my_reprojectImageTo3D(D, Q) -% [h,w] = size(D); -% XYZ = zeros([h,w,3], 'single'); -% for x=1:h -% for y=1:w -% v = Q * [x; y; double(D(x,y)); 1]; -% XYZ(x,y,:) = v(1:3) ./ v(4); -% end -% end -% end +% function XYZ = my_reprojectImageTo3D(D, Q) +% [h,w] = size(D); +% XYZ = zeros([h,w,3], 'single'); +% for x=1:h +% for y=1:w +% v = Q * [x; y; double(D(x,y)); 1]; +% XYZ(x,y,:) = v(1:3) ./ v(4); +% end +% end +% end % % See also: cv.StereoBM, cv.StereoSGBM, cv.stereoRectify, % cv.perspectiveTransform, reconstructScene diff --git a/+cv/resize.m b/+cv/resize.m index e96561ef2..4a85d0ae4 100644 --- a/+cv/resize.m +++ b/+cv/resize.m @@ -1,8 +1,8 @@ %RESIZE Resizes an image % -% dst = cv.resize(src, dsize) -% dst = cv.resize(src, fx, fy) -% dst = cv.resize(..., 'OptionName',optionValue, ...) +% dst = cv.resize(src, dsize) +% dst = cv.resize(src, fx, fy) +% dst = cv.resize(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ input image. @@ -15,30 +15,29 @@ % % ## Output % * __dst__ output image. It has the size `dsize` or the size computed from -% `size(src)` and `fx` and `fy`. The type of `dst` is the same as of -% `src`. +% `size(src)` and `fx` and `fy`. The type of `dst` is the same as of `src`. % % ## Options % * __Interpolation__ interpolation method, default 'Linear'. One of: -% * __Nearest__ a nearest-neighbor interpolation -% * __Linear__ a bilinear interpolation (used by default) -% * __Cubic__ a bicubic interpolation over 4x4 pixel neighborhood -% * __Area__ resampling using pixel area relation. It may be a preferred -% method for image decimation, as it gives moire-free results. But -% when the image is zoomed, it is similar to the 'Nearest' method. -% * __Lanczos4__ a Lanczos interpolation over 8x8 pixel neighborhood +% * __Nearest__ a nearest-neighbor interpolation +% * __Linear__ a bilinear interpolation (used by default) +% * __Cubic__ a bicubic interpolation over 4x4 pixel neighborhood +% * __Area__ resampling using pixel area relation. It may be a preferred +% method for image decimation, as it gives moire-free results. But when +% the image is zoomed, it is similar to the 'Nearest' method. +% * __Lanczos4__ a Lanczos interpolation over 8x8 pixel neighborhood % % The function cv.resize resizes the image `src` down to or up to the % specified size. The size and type of `dst` are derived from `src`, `dsize`, % `fx` and `fy`. If you want to resize `src` so that it fits a specified size, % you may call the function as follows: % -% dst = cv.resize(src, [w,h], 'Interpolation',interp) +% dst = cv.resize(src, [w,h], 'Interpolation',interp) % % If you want to decimate the image by factor of 2 in each direction, you can % call the function this way: % -% dst = cv.resize(src, 0.5, 0.5, 'Interpolation',interp) +% dst = cv.resize(src, 0.5, 0.5, 'Interpolation',interp) % % To shrink an image, it will generally look best with 'Area' interpolation, % whereas to enlarge an image, it will generally look best with 'Cubic' (slow) diff --git a/+cv/rotate.m b/+cv/rotate.m index 613cba548..7e954c752 100644 --- a/+cv/rotate.m +++ b/+cv/rotate.m @@ -1,17 +1,17 @@ %FLIP Rotates a 2D array in multiples of 90 degrees % -% dst = cv.rotate(src, rotateCode) +% dst = cv.rotate(src, rotateCode) % % ## Input % * __src__ input array. % * __rotateCode__ an enum to specify how to rotate the array: -% * __90CW__ Rotate 90 degrees clockwise -% * __180__ Rotate 180 degrees clockwise -% * __90CCW__ Rotate 270 degrees clockwise +% * __90CW__ Rotate 90 degrees clockwise +% * __180__ Rotate 180 degrees clockwise +% * __90CCW__ Rotate 270 degrees clockwise % % ## Output % * __dst__ output array of the same type as `src`. The size is the same with -% '180', and the rows and cols are switched for '90CW' and '90CCW'. +% '180', and the rows and cols are switched for '90CW' and '90CCW'. % % The function cv.rotate rotates the array in one of three different ways: % diff --git a/+cv/rotatedRectangleIntersection.m b/+cv/rotatedRectangleIntersection.m index bb8e39c80..62098bdae 100644 --- a/+cv/rotatedRectangleIntersection.m +++ b/+cv/rotatedRectangleIntersection.m @@ -1,28 +1,26 @@ %ROTATEDRECTANGLEINTERSECTION Finds out if there is any intersection between two rotated rectangles % -% [intersectingRegion,result] = cv.rotatedRectangleIntersection(rect1, rect2) +% [intersectingRegion,result] = cv.rotatedRectangleIntersection(rect1, rect2) % % ## Input % * __rect1__ First rectangle. Structure with the following fields: -% * __center__ The rectangle mass center `[x,y]`. -% * __size__ Width and height of the rectangle `[w,h]`. -% * __angle__ The rotation angle in a clockwise direction. -% When the angle is 0, 90, 180, 270 etc., the -% rectangle becomes an up-right rectangle. +% * __center__ The rectangle mass center `[x,y]`. +% * __size__ Width and height of the rectangle `[w,h]`. +% * __angle__ The rotation angle in a clockwise direction. When the angle is +% 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. % * __rect2__ Second rectangle. Similar struct to first. % % ## Output % * __intersectingRegion__ The output array of the verticies of the -% intersecting region. It returns at most 8 vertices. A cell array of -% 2D points `{[x,y], ...}` +% intersecting region. It returns at most 8 vertices. A cell array of 2D +% points `{[x,y], ...}` % * __result__ types of intersection between rectangles. One of: -% * __None__ No intersection. -% * __Partial__ There is a partial intersection. -% * __Full__ One of the rectangle is fully enclosed in the other. +% * __None__ No intersection. +% * __Partial__ There is a partial intersection. +% * __Full__ One of the rectangle is fully enclosed in the other. % -% Finds out if there is any intersection between two rotated rectangles. -% If there is then the vertices of the intersecting region are returned as -% well. +% Finds out if there is any intersection between two rotated rectangles. If +% there is then the vertices of the intersecting region are returned as well. % % See also: cv.intersectConvexConvex % diff --git a/+cv/sampsonDistance.m b/+cv/sampsonDistance.m index b2a092496..b562f31d7 100644 --- a/+cv/sampsonDistance.m +++ b/+cv/sampsonDistance.m @@ -1,6 +1,6 @@ %SAMPSONDISTANCE Calculates the Sampson Distance between two points % -% sd = cv.sampsonDistance(pt1, pt2, F) +% sd = cv.sampsonDistance(pt1, pt2, F) % % ## Input % * __pt1__ first homogeneous 2D point. @@ -13,9 +13,9 @@ % The function cv.sampsonDistance calculates and returns the first order % approximation of the geometric error as: % -% (pt2'*F*pt1)^2 -% sd(pt1,pt2) = ----------------------------------------------------- -% ((F*pt1)(0) + (F*pt1)(1) + (F'*pt2)(0) + (F'*pt2)(1)) +% (pt2'*F*pt1)^2 +% sd(pt1,pt2) = ----------------------------------------------------- +% ((F*pt1)(0) + (F*pt1)(1) + (F'*pt2)(0) + (F'*pt2)(1)) % % The fundamental matrix may be calculated using the cv.findFundamentalMat % function. See HZ 11.4.3 for details. diff --git a/+cv/seamlessClone.m b/+cv/seamlessClone.m index ae33fd6cc..eb9a76f74 100644 --- a/+cv/seamlessClone.m +++ b/+cv/seamlessClone.m @@ -1,7 +1,7 @@ %SEAMLESSCLONE Seamless Cloning % -% blend = cv.seamlessClone(src, dst, mask, p) -% blend = cv.seamlessClone(src, dst, mask, p, 'OptionName',optionValue, ...) +% blend = cv.seamlessClone(src, dst, mask, p) +% blend = cv.seamlessClone(src, dst, mask, p, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -14,19 +14,18 @@ % % ## Options % * __Method__ Cloning method that could be one of the following: -% * __NormalClone__ (default) The power of the method is fully expressed -% when inserting objects with complex outlines into a new -% background. -% * __MixedClone__ The classic method, color-based selection and alpha -% masking might be time consuming and often leaves an undesirable -% halo. Seamless cloning, even averaged with the original image, -% is not effective. Mixed seamless cloning based on a loose -% selection proves effective. -% * __MonochromeTransfer__ Monochrome transfer allows the user to easily -% replace certain features of one object by alternative features. +% * __NormalClone__ (default) The power of the method is fully expressed +% when inserting objects with complex outlines into a new background. +% * __MixedClone__ The classic method, color-based selection and alpha +% masking might be time consuming and often leaves an undesirable halo. +% Seamless cloning, even averaged with the original image, is not +% effective. Mixed seamless cloning based on a loose selection proves +% effective. +% * __MonochromeTransfer__ Monochrome transfer allows the user to easily +% replace certain features of one object by alternative features. % * __FlipChannels__ whether to flip the order of color channels in inputs -% `src` and `mask` and output `dst`, between MATLAB's RGB order and -% OpenCV's BGR (input: RGB->BGR, output: BGR->RGB). default false +% `src` and `mask` and output `dst`, between MATLAB's RGB order and OpenCV's +% BGR (input: RGB->BGR, output: BGR->RGB). default false % % Image editing tasks concern either global changes (color/intensity % corrections, filters, deformations) or local changes concerned to a diff --git a/+cv/sepFilter2D.m b/+cv/sepFilter2D.m index f72442994..ec6a82ced 100644 --- a/+cv/sepFilter2D.m +++ b/+cv/sepFilter2D.m @@ -1,7 +1,7 @@ %SEPFILTER2D Applies a separable linear filter to an image % -% dst = cv.sepFilter2D(src, kernelX, kernelY) -% dst = cv.sepFilter2D(..., 'OptionName',optionValue, ...) +% dst = cv.sepFilter2D(src, kernelX, kernelY) +% dst = cv.sepFilter2D(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Source image. @@ -10,15 +10,15 @@ % % ## Output % * __dst__ Destination image of the same size and the same number of channels -% as `src`. +% as `src`. % % ## Options % * __Anchor__ Anchor position within the kernel. The default value (-1,-1) -% means that the anchor is at the kernel center. +% means that the anchor is at the kernel center. % * __Delta__ Value added to the filtered results before storing them. -% default 0 +% default 0 % * __BorderType__ Pixel extrapolation method. See cv.copyMakeBorder. -% default 'Default' +% default 'Default' % * __DDepth__ Destination image depth, see cv.filter2D. default -1 % % The function applies a separable linear filter to the image. That is, first, diff --git a/+cv/setRNGSeed.m b/+cv/setRNGSeed.m index 5f8be0f3f..670069900 100644 --- a/+cv/setRNGSeed.m +++ b/+cv/setRNGSeed.m @@ -1,6 +1,6 @@ %SETRNGSEED Sets state of default random number generator % -% cv.setRNGSeed(seed) +% cv.setRNGSeed(seed) % % ## Input % * __seed__ integer, new state for default random number generator. diff --git a/+cv/solve.m b/+cv/solve.m index a5eb91de9..46aa8dc0d 100644 --- a/+cv/solve.m +++ b/+cv/solve.m @@ -1,7 +1,7 @@ %SOLVE Solves one or more linear systems or least-squares problems % -% [dst,ret] = cv.solve(src1, src2) -% [...] = cv.solve(..., 'OptionName', optionValue, ...) +% [dst,ret] = cv.solve(src1, src2) +% [...] = cv.solve(..., 'OptionName', optionValue, ...) % % ## Input % * __src1__ input matrix on the left-hand side of the system. @@ -13,33 +13,32 @@ % % ## Options % * __Method__ solution (matrix inversion) method, default 'LU'. One of the -% following matrix decomposition types: -% * __LU__ Gaussian elimination with the optimal pivot element chosen. -% * __SVD__ singular value decomposition (SVD) method; the system can be -% over-defined and/or the matrix `src1` can be singular. -% * __EIG__ eigenvalue decomposition; the matrix `src1` must be -% symmetrical. -% * __Cholesky__ Cholesky LLT factorization; the matrix `src1` must be -% symmetrical and positively defined. -% * __QR__ QR factorization; the system can be over-defined and/or the -% matrix `src1` can be singular. +% following matrix decomposition types: +% * __LU__ Gaussian elimination with the optimal pivot element chosen. +% * __SVD__ singular value decomposition (SVD) method; the system can be +% over-defined and/or the matrix `src1` can be singular. +% * __EIG__ eigenvalue decomposition; the matrix `src1` must be symmetrical. +% * __Cholesky__ Cholesky LLT factorization; the matrix `src1` must be +% symmetrical and positively defined. +% * __QR__ QR factorization; the system can be over-defined and/or the +% matrix `src1` can be singular. % * __IsNormal__ this flag can be used together with any of the previous -% methods; it means that the normal equations -% `src1' * src1 * dst = src1' * src2` are solved instead of the original -% system `src1 * dst = src2`. defaul false +% methods; it means that the normal equations +% `src1' * src1 * dst = src1' * src2` are solved instead of the original +% system `src1 * dst = src2`. defaul false % % The function cv.solve solves a linear system or least-squares problem (the % latter is possible with 'SVD' or 'QR' methods, or by specifying the flag % 'IsNormal'): % -% dst = argmin_{X} ||src1*X - src2|| +% dst = argmin_{X} ||src1*X - src2|| % % If 'LU' or 'Cholesky' method is used, the function returns 1 if `src1` % (or `src1'*src1`) is non-singular. Otherwise, it returns 0. In the latter % case, `dst` is not valid. Other methods find a pseudo-solution in case of a % singular left-hand side part. % -% ## Note +% ### Note % If you want to find a unity-norm solution of an under-defined singular % system `src1 * dst = 0`, the function cv.solve will not do the work. % Use cv.SVD.SolveZ instead. diff --git a/+cv/solveLP.m b/+cv/solveLP.m index 52938a39e..c58e00931 100644 --- a/+cv/solveLP.m +++ b/+cv/solveLP.m @@ -1,36 +1,36 @@ %SOLVELP Solve given (non-integer) linear programming problem using the Simplex Algorithm % -% [z, res] = cv.solveLP(Func, Constr) -% [...] = cv.solveLP(..., 'OptionName', optionValue, ...) +% [z, res] = cv.solveLP(Func, Constr) +% [...] = cv.solveLP(..., 'OptionName', optionValue, ...) % % ## Input % * __Func__ This row-vector corresponds to `c` in the LP problem formulation -% (see below). It should contain 32- or 64-bit floating-point numbers. -% As a convenience, column-vector may be also submitted, in the latter -% case it is understood to correspond to `c'`. +% (see below). It should contain 32- or 64-bit floating-point numbers. As a +% convenience, column-vector may be also submitted, in the latter case it is +% understood to correspond to `c'`. % * __Constr__ m-by-n+1 matrix, whose rightmost column corresponds to `b` in -% formulation above and the remaining to `A`. It should containt 32- or -% 64-bit floating-point numbers. +% formulation above and the remaining to `A`. It should containt 32- or +% 64-bit floating-point numbers. % % ## Output -% * __z__ The solution will be returned here as a column-vector - it -% corresponds to `x` in the formulation above. It will contain 64-bit -% floating-point numbers. +% * __z__ The solution will be returned here as a column-vector, it +% corresponds to `x` in the formulation above. It will contain 64-bit +% floating-point numbers. % * __res__ Return code. One of: -% * __Unbounded__ problem is unbounded (target function can achieve -% arbitrary high values) -% * __Unfeasible__ problem is unfeasible (there are no points that -% satisfy all the constraints imposed) -% * __Single__ there is only one maximum for target function -% * __Multi__ there are multiple maxima for target function - the -% arbitrary one is returned +% * __Unbounded__ problem is unbounded (target function can achieve +% arbitrary high values) +% * __Unfeasible__ problem is unfeasible (there are no points that satisfy +% all the constraints imposed) +% * __Single__ there is only one maximum for target function +% * __Multi__ there are multiple maxima for target function, the arbitrary +% one is returned % % What we mean here by "linear programming problem" (or LP problem, for short) % can be formulated as: % -% Maximize c*x -% subject to A*x <= b -% x >= 0 +% Maximize c*x +% subject to A*x <= b +% x >= 0 % % Where `c` is fixed 1-by-n row-vector, `A` is fixed m-by-n matrix, `b` is % fixed m-by-1 column vector and `x` is an arbitrary n-by-1 column vector, @@ -48,7 +48,7 @@ % > Introduction to Algorithms, 3rd edition % > by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. % -% In particular, the [Bland's rule](http://en.wikipedia.org/wiki/Bland%27s_rule) +% In particular, the [Bland's rule](https://en.wikipedia.org/wiki/Bland%27s_rule) % is used to prevent cycling. % % See also: linprog diff --git a/+cv/solveP3P.m b/+cv/solveP3P.m index 8abc56bbc..95f140fcc 100644 --- a/+cv/solveP3P.m +++ b/+cv/solveP3P.m @@ -1,34 +1,33 @@ %SOLVEP3P Finds an object pose from 3 3D-2D point correspondences % -% [rvecs, tvecs, solutions] = cv.solveP3P(objectPoints, imagePoints, cameraMatrix) -% [...] = cv.solveP3P(..., 'OptionName', optionValue, ...) +% [rvecs, tvecs, solutions] = cv.solveP3P(objectPoints, imagePoints, cameraMatrix) +% [...] = cv.solveP3P(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ Array of object points in the object coordinate space, -% 1xNx3/Nx1x3 or Nx3 array, where `N=3` is the number of points, or cell -% array of length `N=3` of 3-element vectors can be also passed here -% `{[x1,y1,z1], [x2,y2,z2], [x3,y3,z3]}`. -% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or -% Nx2 array, where `N=3` is the number of points, or cell array of -% length `N=3` of 2-element vectors can be also passed here -% `{[x1,y1], [x2,y2], [x3,y3]}`. +% 1xNx3/Nx1x3 or Nx3 array, where `N=3` is the number of points, or cell +% array of length `N=3` of 3-element vectors can be also passed here +% `{[x1,y1,z1], [x2,y2,z2], [x3,y3,z3]}`. +% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or Nx2 +% array, where `N=3` is the number of points, or cell array of length `N=3` +% of 2-element vectors can be also passed here `{[x1,y1], [x2,y2], [x3,y3]}`. % * __cameraMatrix__ Input camera matrix `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % % ## Output % * __rvecs__ Output rotation vectors (see cv.Rodrigues) that, together with -% `tvecs`, brings points from the model coordinate system to the camera -% coordinate system. A P3P problem has up to 4 solutions. +% `tvecs`, brings points from the model coordinate system to the camera +% coordinate system. A P3P problem has up to 4 solutions. % * __tvecs__ Output translation vectors. % * __solutions__ number of solutions. % % ## Options % * __DistCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. default empty. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. default empty. % * __Method__ Method for solving the P3P problem. One of the following: -% * __P3P__ (default) Method is based on the paper [gao2003complete]. -% * __AP3P__ Method is based on the paper [Ke17]. +% * __P3P__ (default) Method is based on the paper [gao2003complete]. +% * __AP3P__ Method is based on the paper [Ke17]. % % The function estimates the object pose given 3 object points, their % corresponding image projections, as well as the camera matrix and the diff --git a/+cv/solvePnP.m b/+cv/solvePnP.m index 54f26c3ef..36d1779fc 100644 --- a/+cv/solvePnP.m +++ b/+cv/solvePnP.m @@ -1,55 +1,53 @@ %SOLVEPNP Finds an object pose from 3D-2D point correspondences % -% [rvec, tvec, success] = cv.solvePnP(objectPoints, imagePoints, cameraMatrix) -% [...] = cv.solvePnP(..., 'OptionName', optionValue, ...) +% [rvec, tvec, success] = cv.solvePnP(objectPoints, imagePoints, cameraMatrix) +% [...] = cv.solvePnP(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ Array of object points in the object coordinate space, -% 1xNx3/Nx1x3 or Nx3 array, where `N` is the number of points, or cell -% array of length `N` of 3-element vectors can be also passed here -% `{[x,y,z], ...}`. -% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or -% Nx2 array, where `N` is the number of points, or cell array of -% length `N` of 2-element vectors can be also passed here -% `{[x,y], ...}`. +% 1xNx3/Nx1x3 or Nx3 array, where `N` is the number of points, or cell +% array of length `N` of 3-element vectors can be also passed here +% `{[x,y,z], ...}`. +% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or Nx2 +% array, where `N` is the number of points, or cell array of length `N` of +% 2-element vectors can be also passed here `{[x,y], ...}`. % * __cameraMatrix__ Input camera matrix `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % % ## Output % * __rvec__ Output rotation vector (see cv.Rodrigues) that, together with -% `tvec`, brings points from the model coordinate system to the camera -% coordinate system. +% `tvec`, brings points from the model coordinate system to the camera +% coordinate system. % * __tvec__ Output translation vector. % * __success__ success logical flag. % % ## Options % * __DistCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. default empty. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. default empty. % * __Rvec__ Initial `rvec`. Not set by default. % * __Tvec__ Initial `tvec`. Not set by default. % * __UseExtrinsicGuess__ Parameter used for `Method='Iterative'`. If true, -% the function uses the provided `rvec` and `tvec` values as initial -% approximations of the rotation and translation vectors, respectively, -% and further optimizes them. default false. +% the function uses the provided `rvec` and `tvec` values as initial +% approximations of the rotation and translation vectors, respectively, and +% further optimizes them. default false. % * __Method__ Method for solving the PnP problem. One of the following: -% * __Iterative__ Iterative method is based on Levenberg-Marquardt -% optimization. In this case the function finds such a pose that -% minimizes reprojection error, that is the sum of squared -% distances between the observed projections `imagePoints` and the -% projected (using cv.projectPoints) `objectPoints`. This is the -% default. -% * __P3P__ Method is based on the paper [gao2003complete]. In this case -% the function requires exactly four object and image points. -% * __AP3P__ Method is based on the paper [Ke17]. In this case the -% function requires exactly four object and image points. -% * __EPnP__ Method has been introduced in the paper [morenoepnp] and -% [lepetit2009epnp]. -% * __DLS__ Method is based on the paper [hesch2011direct]. -% * __UPnP__ Method is based on the paper [penate2013exhaustive]. In -% this case the function also estimates the parameters `fx` and -% `fy` assuming that both have the same value. Then the -% `cameraMatrix` is updated with the estimated focal length. +% * __Iterative__ Iterative method is based on Levenberg-Marquardt +% optimization. In this case the function finds such a pose that minimizes +% reprojection error, that is the sum of squared distances between the +% observed projections `imagePoints` and the projected (using +% cv.projectPoints) `objectPoints`. This is the default. +% * __P3P__ Method is based on the paper [gao2003complete]. In this case the +% function requires exactly four object and image points. +% * __AP3P__ Method is based on the paper [Ke17]. In this case the function +% requires exactly four object and image points. +% * __EPnP__ Method has been introduced in the paper [morenoepnp] and +% [lepetit2009epnp]. +% * __DLS__ Method is based on the paper [hesch2011direct]. +% * __UPnP__ Method is based on the paper [penate2013exhaustive]. In this +% case the function also estimates the parameters `fx` and `fy` assuming +% that both have the same value. Then the `cameraMatrix` is updated with +% the estimated focal length. % % The function estimates the object pose given a set of object points, % their corresponding image projections, as well as the camera matrix and diff --git a/+cv/solvePnPRansac.m b/+cv/solvePnPRansac.m index 4c2ff71ee..e5d1fa041 100644 --- a/+cv/solvePnPRansac.m +++ b/+cv/solvePnPRansac.m @@ -1,56 +1,53 @@ %SOLVEPNPRANSAC Finds an object pose from 3D-2D point correspondences using the RANSAC scheme % -% [rvec, tvec, success, inliers] = cv.solvePnPRansac(objectPoints, imagePoints, cameraMatrix) -% [...] = cv.solvePnPRansac(..., 'OptionName', optionValue, ...) +% [rvec, tvec, success, inliers] = cv.solvePnPRansac(objectPoints, imagePoints, cameraMatrix) +% [...] = cv.solvePnPRansac(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ Array of object points in the object coordinate space, -% 1xNx3/Nx1x3 or Nx3 array, where `N` is the number of points, or cell -% array of length `N` of 3-element vectors can be also passed here -% `{[x,y,z], ...}`. -% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or -% Nx2 array, where `N` is the number of points, or cell array of -% length `N` of 2-element vectors can be also passed here -% `{[x,y], ...}`. +% 1xNx3/Nx1x3 or Nx3 array, where `N` is the number of points, or cell +% array of length `N` of 3-element vectors can be also passed here +% `{[x,y,z], ...}`. +% * __imagePoints__ Array of corresponding image points, 1xNx2/Nx1x2 or Nx2 +% array, where `N` is the number of points, or cell array of length `N` of +% 2-element vectors can be also passed here `{[x,y], ...}`. % * __cameraMatrix__ Input camera matrix `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % % ## Output % * __rvec__ Output rotation vector (see cv.Rodrigues) that, together with -% `tvec`, brings points from the model coordinate system to the -% camera coordinate system. +% `tvec`, brings points from the model coordinate system to the camera +% coordinate system. % * __tvec__ Output translation vector. % * __success__ success logical flag. % * __inliers__ Output vector that contains indices (zero-based) of inliers in -% `objectPoints` and `imagePoints`. +% `objectPoints` and `imagePoints`. % % ## Options % * __DistCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. default empty. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. default empty. % * __Rvec__ Initial `rvec`. Not set by default. % * __Tvec__ Initial `tvec`. Not set by default. % * __UseExtrinsicGuess__ Parameter used for `Method='Iterative'`. If true, -% the function uses the provided `rvec` and `tvec` values as initial -% approximations of the rotation and translation vectors, respectively, -% and further optimizes them. default false. +% the function uses the provided `rvec` and `tvec` values as initial +% approximations of the rotation and translation vectors, respectively, and +% further optimizes them. default false. % * __IterationsCount__ Number of iterations. default 100. -% * __ReprojectionError__ Inlier threshold value used by the RANSAC -% procedure. The parameter value is the maximum allowed distance -% between the observed and computed point projections to consider -% it an inlier. default 8.0. -% * __Confidence__ The probability that the algorithm produces a useful -% result. default 0.99 +% * __ReprojectionError__ Inlier threshold value used by the RANSAC procedure. +% The parameter value is the maximum allowed distance between the observed +% and computed point projections to consider it an inlier. default 8.0. +% * __Confidence__ The probability that the algorithm produces a useful result. +% default 0.99 % * __Method__ Method for solving the PnP problem. See cv.solvePnP. -% default 'Iterative' +% default 'Iterative' % % The function estimates an object pose given a set of object points, their % corresponding image projections, as well as the camera matrix and the % distortion coefficients. This function finds such a pose that minimizes % reprojection error, that is, the sum of squared distances between the -% observed projections `imagePoints` and the projected (using -% cv.projectPoints) `objectPoints`. The use of RANSAC makes the function -% resistant to outliers. +% observed projections `imagePoints` and the projected (using cv.projectPoints) +% `objectPoints`. The use of RANSAC makes the function resistant to outliers. % % Note: The default method used to estimate the camera pose for the Minimal % Sample Sets step is `EPnP`. Exceptions: if you choose `P3P` or `AP3P`, these diff --git a/+cv/spatialGradient.m b/+cv/spatialGradient.m index 0391a90f4..0af3471dc 100644 --- a/+cv/spatialGradient.m +++ b/+cv/spatialGradient.m @@ -1,7 +1,7 @@ %SPATIALGRADIENT Calculates the first order image derivative in both x and y using a Sobel operator % -% [dx, dy] = cv.spatialGradient(src) -% [...] = cv.spatialGradient(..., 'OptionName', optionValue, ...) +% [dx, dy] = cv.spatialGradient(src) +% [...] = cv.spatialGradient(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ input image, 8-bit single-channel. @@ -11,16 +11,15 @@ % * __dy__ output `int16` image with first-order derivative in y. % % ## Options -% * __KSize__ size of Sobel kernel. It must be 3 in the current -% implementation. default 3 -% * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. -% Only 'Default', 'Reflect101', and 'Replicate' are supported. -% default 'Default' +% * __KSize__ size of Sobel kernel. It must be 3 in the current implementation. +% default 3 +% * __BorderType__ Pixel extrapolation method, see cv.copyMakeBorder. Only +% 'Default', 'Reflect101', and 'Replicate' are supported. default 'Default' % % Equivalent to calling: % -% dx = cv.Sobel(src, 'DDepth','int16', 'XOrder',1, 'YOrder',0, 'KSize',3); -% dy = cv.Sobel(src, 'DDepth','int16', 'XOrder',0, 'YOrder',1, 'KSize',3); +% dx = cv.Sobel(src, 'DDepth','int16', 'XOrder',1, 'YOrder',0, 'KSize',3); +% dy = cv.Sobel(src, 'DDepth','int16', 'XOrder',0, 'YOrder',1, 'KSize',3); % % See also: cv.Sobel, imgradientxy, imgradient % diff --git a/+cv/sqrBoxFilter.m b/+cv/sqrBoxFilter.m index f2deafce9..d6f9fc155 100644 --- a/+cv/sqrBoxFilter.m +++ b/+cv/sqrBoxFilter.m @@ -1,7 +1,7 @@ %SQRBOXFILTER Calculates the normalized sum of squares of the pixel values overlapping the filter % -% dst = cv.sqrBoxFilter(src) -% dst = cv.sqrBoxFilter(src, 'OptionName',optionValue, ...) +% dst = cv.sqrBoxFilter(src) +% dst = cv.sqrBoxFilter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input image. @@ -11,15 +11,15 @@ % % ## Options % * __DDepth__ the output image depth (-1 to use `class(src)`). Default -1, -% which chooses between `single` or `double` (`double` if input image is -% also `double`, `single` otherwise). See cv.filter2D for details. +% which chooses between `single` or `double` (`double` if input image is +% also `double`, `single` otherwise). See cv.filter2D for details. % * __KSize__ kernel size. Default [5,5] % * __Anchor__ kernel anchor point `[x,y]`. The default value [-1,-1] denotes -% that the anchor is at the kernel center +% that the anchor is at the kernel center % * __Normalize__ flag, specifying whether the kernel is normalized by its -% area or not. default true +% area or not. default true % * __BorderType__ border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. Default 'Default' +% image. See cv.copyMakeBorder. Default 'Default' % % For every pixel (x,y) in the source image, the function calculates the sum % of squares of those neighboring pixel values which overlap the filter placed diff --git a/+cv/stereoCalibrate.m b/+cv/stereoCalibrate.m index 0de1e0404..e3a1c7fd5 100644 --- a/+cv/stereoCalibrate.m +++ b/+cv/stereoCalibrate.m @@ -1,98 +1,98 @@ %STEREOCALIBRATE Calibrates the stereo camera % -% S = cv.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize) -% [...] = cv.stereoCalibrate(..., 'OptionName', optionValue, ...) +% S = cv.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize) +% [...] = cv.stereoCalibrate(..., 'OptionName', optionValue, ...) % % ## Input % * __objectPoints__ A cell array of cells of calibration pattern points in -% the calibration pattern coordinate space `{{[x,y,z], ..}, ...}`. +% the calibration pattern coordinate space `{{[x,y,z], ..}, ...}`. % * __imagePoints1__ A cell array of cells of the projections of calibration -% pattern points `{{[x,y], ..}, ...}`, observed by the first camera. +% pattern points `{{[x,y], ..}, ...}`, observed by the first camera. % * __imagePoints2__ A cell array of cells of the projections of calibration -% pattern points `{{[x,y], ..}, ...}`, observed by the second camera. +% pattern points `{{[x,y], ..}, ...}`, observed by the second camera. % * __imageSize__ Size of the image used only to initialize the intrinsic -% camera matrix `[w,h]`. +% camera matrix `[w,h]`. % % ## Output % * __S__ scalar struct having the following fields: -% * __cameraMatrix1__ output first camera matrix -% `A = [fx1 0 cx1; 0 fy1 cy1; 0 0 1]`. -% * __distCoeffs1__ output vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 -% or 14 elements. The output vector length depends on the options. -% * __cameraMatrix2__ output second camera matrix -% `A = [fx2 0 cx2; 0 fy2 cy2; 0 0 1]`. The parameter is similar to -% `cameraMatrix1`. -% * __distCoeffs2__ output lens distortion coefficients for the second -% camera. The parameter is similar to `distCoeffs1`. -% * __R__ output 3x3 rotation matrix between the 1st and the 2nd camera -% coordinate systems. -% * __T__ output 3x1 translation vector between the coordinate systems -% of the cameras. -% * __E__ output 3x3 essential matrix. -% * __F__ output 3x3 fundamental matrix. -% * __reprojErr__ output final re-projection error (scalar). +% * __cameraMatrix1__ output first camera matrix +% `A = [fx1 0 cx1; 0 fy1 cy1; 0 0 1]`. +% * __distCoeffs1__ output vector of distortion coefficients +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 +% or 14 elements. The output vector length depends on the options. +% * __cameraMatrix2__ output second camera matrix +% `A = [fx2 0 cx2; 0 fy2 cy2; 0 0 1]`. The parameter is similar to +% `cameraMatrix1`. +% * __distCoeffs2__ output lens distortion coefficients for the second +% camera. The parameter is similar to `distCoeffs1`. +% * __R__ output 3x3 rotation matrix between the 1st and the 2nd camera +% coordinate systems. +% * __T__ output 3x1 translation vector between the coordinate systems of +% the cameras. +% * __E__ output 3x3 essential matrix. +% * __F__ output 3x3 fundamental matrix. +% * __reprojErr__ output final re-projection error (scalar). % % ## Options % * __CameraMatrix1__, __CameraMatrix2__ Initial camera matrices. If any of -% `UseIntrinsicGuess`, `FixAspectRatio`, `FixIntrinsic` (default), or -% `FixFocalLength` are specified, some or all of the matrix components -% must be initialized. See the flags description for details. +% `UseIntrinsicGuess`, `FixAspectRatio`, `FixIntrinsic` (default), or +% `FixFocalLength` are specified, some or all of the matrix components must +% be initialized. See the flags description for details. % * __DistCoeffs1__, __DistCoeffs2__ Initial lens distortion coefficients. % * __FixIntrinsic__ Fix `cameraMatrix1`,`cameraMatrix2` and `distCoeffs1`, -% `distCoeffs2` so that only `R`, `T`, `E`, and `F` matrices are -% estimated. default true. +% `distCoeffs2` so that only `R`, `T`, `E`, and `F` matrices are estimated. +% default true. % * __UseIntrinsicGuess__ Optimize some or all of the intrinsic parameters -% according to the specified flags. Initial values are provided by -% the user. default false. +% according to the specified flags. Initial values are provided by the user. +% default false. % * __FixPrincipalPoint__ Fix the principal points during the optimization. -% default false. +% default false. % * __FixFocalLength__ Fix `fx1`,`fx2` and `fy1`,`fy2`. default false. % * __FixAspectRatio__ Optimize `fy1`,`fy2` and fix the ratio `fx1/fy1`, -% `fx2/fy2`. default false. +% `fx2/fy2`. default false. % * __SameFocalLength__ Enforce same `fx1=fx2` and `fy1=fy2`. default false. % * __ZeroTangentDist__ Tangential distortion coefficients for each camera are -% set to zeros and stay fixed. default false. +% set to zeros and stay fixed. default false. % * __FixTangentDist__ The tangential distortion coefficients are not -% changed during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% changed during the optimization. If `UseIntrinsicGuess` is set, the +% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, it +% is set to 0. default false. % * __FixK1__, ..., __FixK6__ The corresponding radial distortion coefficient -% is not changed during the optimization. If `UseIntrinsicGuess` is set, -% the coefficient from the supplied `DistCoeffs` matrix is used. -% Otherwise, it is set to 0. default false. +% is not changed during the optimization. If `UseIntrinsicGuess` is set, the +% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, it +% is set to 0. default false. % * __RationalModel__ Coefficients `k4`, `k5`, and `k6` are enabled. To -% provide the backward compatibility, this extra flag should be -% explicitly specified to make the calibration function use the rational -% model and return 8 coefficients. If the flag is not set, the function -% computes and returns only 5 distortion coefficients. default false. -% (`RationalModel` as false implies `FixK4`,`FixK5`,`FixK6` as true). +% provide the backward compatibility, this extra flag should be explicitly +% specified to make the calibration function use the rational model and +% return 8 coefficients. If the flag is not set, the function computes and +% returns only 5 distortion coefficients. default false. +% (`RationalModel` as false implies `FixK4`,`FixK5`,`FixK6` as true). % * __ThinPrismModel__ Coefficients `s1`, `s2`, `s3` and `s4` are enabled. To -% provide the backward compatibility, this extra flag should be -% explicitly specified to make the calibration function use the thin -% prism model and return 12 coefficients. If the flag is not set, the -% function computes and returns only 5 distortion coefficients. default -% false. (`ThinPrismModel` as false implies `FixS1S2S3S4` as true). +% provide the backward compatibility, this extra flag should be explicitly +% specified to make the calibration function use the thin prism model and +% return 12 coefficients. If the flag is not set, the function computes and +% returns only 5 distortion coefficients. default false. +% (`ThinPrismModel` as false implies `FixS1S2S3S4` as true). % * __FixS1S2S3S4__ The thin prism distortion coefficients are not changed -% during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% during the optimization. If `UseIntrinsicGuess` is set, the coefficient +% from the supplied `DistCoeffs` matrix is used. Otherwise, it is set to 0. +% default false. % * __TiltedModel__ Coefficients `tauX` and `tauY` are enabled. To provide the -% backward compatibility, this extra flag should be explicitly specified -% to make the calibration function use the tilted sensor model and -% return 14 coefficients. If the flag is not set, the function computes -% and returns only 5 distortion coefficients. default false. -% (`TiltedModel` as false implies `FixTauXTauY` as true). +% backward compatibility, this extra flag should be explicitly specified to +% make the calibration function use the tilted sensor model and return 14 +% coefficients. If the flag is not set, the function computes and returns +% only 5 distortion coefficients. default false. +% (`TiltedModel` as false implies `FixTauXTauY` as true). % * __FixTauXTauY__ The coefficients of the tilted sensor model are not -% changed during the optimization. If `UseIntrinsicGuess` is set, the -% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, -% it is set to 0. default false. +% changed during the optimization. If `UseIntrinsicGuess` is set, the +% coefficient from the supplied `DistCoeffs` matrix is used. Otherwise, it +% is set to 0. default false. % * __UseLU__ Use LU instead of SVD decomposition for solving. Much faster but -% potentially less precise. default false. +% potentially less precise. default false. % * __UseQR__ Use QR instead of SVD decomposition for solving. Faster but -% potentially less precise. default false. +% potentially less precise. default false. % * __Criteria__ Termination criteria for the iterative optimization algorithm. -% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',1e-6)` +% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',1e-6)` % % The function estimates transformation between two cameras making a stereo % pair. If you have a stereo camera where the relative position and @@ -104,19 +104,19 @@ % and orientation of the second camera relative to the first camera. This is % what the described function does. It computes `(R,T)` so that: % -% R2 = R * R1 -% T2 = R * T1 + T +% R2 = R * R1 +% T2 = R * T1 + T % % Optionally, it computes the essential matrix `E`: % -% E = [ 0 -T2 T1; -% T2 0 -T0; -% -T1 T0 0] * R +% E = [ 0 -T2 T1; +% T2 0 -T0; +% -T1 T0 0] * R % % where `Ti` are components of the translation vector `T`: `T = [T0,T1,T2]'`. % And the function can also compute the fundamental matrix `F`: % -% F = inv(cameraMatrix2)' * E * inv(cameraMatrix1) +% F = inv(cameraMatrix2)' * E * inv(cameraMatrix1) % % Besides the stereo-related information, the function can also perform a full % calibration of each of two cameras. However, due to the high dimensionality diff --git a/+cv/stereoRectify.m b/+cv/stereoRectify.m index 7156b4517..f874f451a 100644 --- a/+cv/stereoRectify.m +++ b/+cv/stereoRectify.m @@ -1,57 +1,55 @@ %STEREORECTIFY Computes rectification transforms for each head of a calibrated stereo camera % -% S = cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T) -% [...] = cv.stereoRectify(..., 'OptionName', optionValue, ...) +% S = cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T) +% [...] = cv.stereoRectify(..., 'OptionName', optionValue, ...) % % ## Input % * __cameraMatrix1__ First camera matrix 3x3. % * __distCoeffs1__ First camera distortion parameters of 4, 5, 8, 12 or 14 -% elements. +% elements. % * __cameraMatrix2__ Second camera matrix 3x3. % * __distCoeffs2__ Second camera distortion parameters of 4, 5, 8, 12 or 14 -% elements. +% elements. % * __imageSize__ Size of the image used for stereo calibration `[w,h]`. % * __R__ Rotation matrix between the coordinate systems of the first and the -% second cameras, 3x3/3x1 (see cv.Rodrigues) +% second cameras, 3x3/3x1 (see cv.Rodrigues) % * __T__ Translation vector between coordinate systems of the cameras, 3x1. % % ## Output % * __S__ scalar struct having the following fields: -% * __R1__ 3x3 rectification transform (rotation matrix) for the first -% camera. -% * __R2__ 3x3 rectification transform (rotation matrix) for the second -% camera. -% * __P1__ 3x4 projection matrix in the new (rectified) coordinate -% systems for the first camera. -% * __P2__ 3x4 projection matrix in the new (rectified) coordinate -% systems for the second camera. -% * __Q__ 4x4 disparity-to-depth mapping matrix (see -% cv.reprojectImageTo3D). -% * __roi1__, __roi2__ rectangles inside the rectified images where all -% the pixels are valid `[x,y,w,h]`. If `Alpha=0`, the ROIs cover -% the whole images. Otherwise, they are likely to be smaller. +% * __R1__ 3x3 rectification transform (rotation matrix) for the first +% camera. +% * __R2__ 3x3 rectification transform (rotation matrix) for the second +% camera. +% * __P1__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the first camera. +% * __P2__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the second camera. +% * __Q__ 4x4 disparity-to-depth mapping matrix (see cv.reprojectImageTo3D). +% * __roi1__, __roi2__ rectangles inside the rectified images where all the +% pixels are valid `[x,y,w,h]`. If `Alpha=0`, the ROIs cover the whole +% images. Otherwise, they are likely to be smaller. % % ## Options % * __ZeroDisparity__ If the flag is set, the function makes the principal -% points of each camera have the same pixel coordinates in the rectified -% views. And if the flag is not set, the function may still shift the -% images in the horizontal or vertical direction (depending on the -% orientation of epipolar lines) to maximize the useful image area. -% default true. +% points of each camera have the same pixel coordinates in the rectified +% views. And if the flag is not set, the function may still shift the images +% in the horizontal or vertical direction (depending on the orientation of +% epipolar lines) to maximize the useful image area. default true. % * __Alpha__ Free scaling parameter. If it is -1 or absent, the function -% performs the default scaling. Otherwise, the parameter should be -% between 0 and 1. `Alpha=0` means that the rectified images are zoomed -% and shifted so that only valid pixels are visible (no black areas -% after rectification). `Alpha=1` means that the rectified image is -% decimated and shifted so that all the pixels from the original images -% from the cameras are retained in the rectified images (no source image -% pixels are lost). Obviously, any intermediate value yields an -% intermediate result between those two extreme cases. default -1 +% performs the default scaling. Otherwise, the parameter should be between 0 +% and 1. `Alpha=0` means that the rectified images are zoomed and shifted so +% that only valid pixels are visible (no black areas after rectification). +% `Alpha=1` means that the rectified image is decimated and shifted so that +% all the pixels from the original images from the cameras are retained in +% the rectified images (no source image pixels are lost). Obviously, any +% intermediate value yields an intermediate result between those two extreme +% cases. default -1 % * __NewImageSize__ New image resolution after rectification. The same size -% should be passed to cv.initUndistortRectifyMap. When [0,0] is passed -% (default), it is set to the original `imageSize`. Setting it to larger -% value can help you preserve details in the original image, especially -% when there is a big radial distortion. +% should be passed to cv.initUndistortRectifyMap. When [0,0] is passed +% (default), it is set to the original `imageSize`. Setting it to larger +% value can help you preserve details in the original image, especially when +% there is a big radial distortion. % % The function computes the rotation matrices for each camera that (virtually) % make both camera image planes the same plane. Consequently, this makes all @@ -62,10 +60,10 @@ % distinguishes the following two cases: % % 1. **Horizontal stereo**: the first and the second camera views are shifted -% relative to each other mainly along the x axis (with possible small vertical -% shift). In the rectified images, the corresponding epipolar lines in the -% left and right cameras are horizontal and have the same y-coordinate. `P1` -% and `P2` look like: +% relative to each other mainly along the x axis (with possible small +% vertical shift). In the rectified images, the corresponding epipolar +% lines in the left and right cameras are horizontal and have the same +% y-coordinate. `P1` and `P2` look like: % % P1 = [f 0 cx1 0; % 0 f cy 0; @@ -76,12 +74,13 @@ % 0 0 1 0] % % where `Tx` is a horizontal shift between the cameras and `cx1=cx2` if -% 'ZeroDisparity' is set. +% 'ZeroDisparity' is set. % % 2. **Vertical stereo**: the first and the second camera views are shifted -% relative to each other mainly in vertical direction (and probably a bit in -% the horizontal direction too). The epipolar lines in the rectified images -% are vertical and have the same x-coordinate. `P1` and `P2` look like: +% relative to each other mainly in vertical direction (and probably a bit +% in the horizontal direction too). The epipolar lines in the rectified +% images are vertical and have the same x-coordinate. `P1` and `P2` look +% like: % % P1 = [f 0 cx 0; % 0 f cy1 0; @@ -92,7 +91,7 @@ % 0 0 1 0] % % where `Ty` is a vertical shift between the cameras and `cy1=cy2` if -% 'ZeroDisparity' is set. +% 'ZeroDisparity' is set. % % As you can see, the first three columns of `P1` and `P2` will effectively be % the new "rectified" camera matrices. The matrices, together with `R1` and diff --git a/+cv/stereoRectifyUncalibrated.m b/+cv/stereoRectifyUncalibrated.m index 040b976f4..33eb0f20b 100644 --- a/+cv/stereoRectifyUncalibrated.m +++ b/+cv/stereoRectifyUncalibrated.m @@ -1,17 +1,16 @@ %STEREORECTIFYUNCALIBRATED Computes a rectification transform for an uncalibrated stereo camera % -% [H1,H2,success] = cv.stereoRectifyUncalibrated(points1, points2, F, imageSize) -% [...] = cv.stereoRectifyUncalibrated(..., 'OptionName', optionValue, ...) +% [H1,H2,success] = cv.stereoRectifyUncalibrated(points1, points2, F, imageSize) +% [...] = cv.stereoRectifyUncalibrated(..., 'OptionName', optionValue, ...) % % ## Input % * __points1__ Array of feature points in the first image as a cell array of -% 2-element vectors: `{[x1, y1], [x2, y2], ...}` or an Nx2/Nx1x2/1xNx2 -% numeric array. The same formats as in cv.findFundamentalMat are -% supported. +% 2-element vectors: `{[x1, y1], [x2, y2], ...}` or an Nx2/Nx1x2/1xNx2 +% numeric array. The same formats as in cv.findFundamentalMat are supported. % * __points2__ The corresponding points in the second image, same size and -% type as `points1`. +% type as `points1`. % * __F__ Input 3x3 fundamental matrix. It can be computed from the same set -% of point pairs using cv.findFundamentalMat. +% of point pairs using cv.findFundamentalMat. % * __imageSize__ Size of the image `[w,h]`. % % ## Output @@ -21,11 +20,11 @@ % % ## Options % * __Threshold__ Optional threshold used to filter out the outliers. If the -% parameter is greater than zero, all the point pairs that do not comply -% with the epipolar geometry (that is, the points for which -% `|points2{i}' * F * points1{i}| > Threshold`) are rejected prior to -% computing the homographies. Otherwise,all the points are considered -% inliers. default 5 +% parameter is greater than zero, all the point pairs that do not comply +% with the epipolar geometry (that is, the points for which +% `|points2{i}' * F * points1{i}| > Threshold`) are rejected prior to +% computing the homographies. Otherwise,all the points are considered +% inliers. default 5 % % The function computes the rectification transformations without knowing % intrinsic parameters of the cameras and their relative position in the @@ -35,7 +34,7 @@ % transformations encoded by the homography matrices `H1` and `H2`. The % function implements the algorithm [Hartley99]. % -% ## Note +% ### Note % While the algorithm does not need to know the intrinsic parameters of the % cameras, it heavily depends on the epipolar geometry. Therefore, if the % camera lenses have a significant distortion, it would be better to correct diff --git a/+cv/stylization.m b/+cv/stylization.m index 92ee9c018..bdebbd897 100644 --- a/+cv/stylization.m +++ b/+cv/stylization.m @@ -1,7 +1,7 @@ %STYLIZATION Stylization filter % -% dst = cv.stylization(src) -% dst = cv.stylization(src, 'OptionName',optionValue, ...) +% dst = cv.stylization(src) +% dst = cv.stylization(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -13,8 +13,8 @@ % * __SigmaS__ Range between 0 to 200. default 60 % * __SigmaR__ Range between 0 to 1. default 0.45 % * __FlipChannels__ whether to flip the order of color channels in input -% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR -% (input: RGB->BGR, output: BGR->RGB). default false +% `src` and output `dst`, between MATLAB's RGB order and OpenCV's BGR +% (input: RGB->BGR, output: BGR->RGB). default false % % Stylization aims to produce digital imagery with a wide variety of effects % not focused on photorealism. Edge-aware filters are ideal for stylization, diff --git a/+cv/subtract.m b/+cv/subtract.m index 4204f4160..88999f4c7 100644 --- a/+cv/subtract.m +++ b/+cv/subtract.m @@ -1,7 +1,7 @@ %SUBTRACT Calculates the per-element difference between two arrays or array and a scalar % -% dst = cv.subtract(src1, src2) -% dst = cv.subtract(src1, src2, 'OptionName',optionValue, ...) +% dst = cv.subtract(src1, src2) +% dst = cv.subtract(src1, src2, 'OptionName',optionValue, ...) % % ## Input % * __src1__ first input array or a scalar. @@ -9,46 +9,46 @@ % % ## Output % * __dst__ output array of the same size and number of channels as the input -% array(s). The depth is defined by `DType` or that of `src1`/`src2`. +% array(s). The depth is defined by `DType` or that of `src1`/`src2`. % % ## Options % * __Mask__ optional operation mask; this is an 8-bit single channel array -% that specifies elements of the output array to be changed. Not set by -% default. +% that specifies elements of the output array to be changed. Not set by +% default. % * __Dest__ Used to initialize the output `dst` when a mask is used. Not set -% by default. +% by default. % * __DType__ optional depth of the output array: `uint8`, `int16`, `double`, -% etc. (see the discussion below). Must be specified if input arrays are -% of different types. default -1 +% etc. (see the discussion below). Must be specified if input arrays are of +% different types. default -1 % % The function cv.subtract calculates: % % * Difference between two arrays, when both input arrays have the same size % and the same number of channels: % -% dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0 +% dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0 % % * Difference between an array and a scalar, when `src2` is constructed from % Scalar or has the same number of elements as `size(src1,3)`: % -% dst(I) = saturate(src1(I) - src2) if mask(I) != 0 +% dst(I) = saturate(src1(I) - src2) if mask(I) != 0 % % * Difference between a scalar and an array, when `src1` is constructed from % Scalar or has the same number of elements as `size(src2,3)`: % -% dst(I) = saturate(src1 - src2(I)) if mask(I) != 0 +% dst(I) = saturate(src1 - src2(I)) if mask(I) != 0 % % * The reverse difference between a scalar and an array in the case of % `SubRS`: % -% dst(I) = saturate(src2 - src1(I)) if mask(I) != 0 +% dst(I) = saturate(src2 - src1(I)) if mask(I) != 0 % % where `I` is a multi-dimensional index of array elements. In case of % multi-channel arrays, each channel is processed independently. % % The first function in the list above can be replaced with matrix expressions: % -% dst = src1 - src2; +% dst = src1 - src2; % % The input arrays and the output array can all have the same or different % depths. For example, you can subtract to 8-bit unsigned arrays and store the diff --git a/+cv/tempfile.m b/+cv/tempfile.m index 1df61e187..52c40ac9d 100644 --- a/+cv/tempfile.m +++ b/+cv/tempfile.m @@ -1,14 +1,14 @@ %TEMPFILE Return name of a temporary file % -% fname = cv.tempfile() -% fname = cv.tempfile('OptionName',optionValue, ...) +% fname = cv.tempfile() +% fname = cv.tempfile('OptionName',optionValue, ...) % % ## Output % * __fname__ Name of temporary file. % % ## Options % * __Suffix__ optional suffix (file extension). A '.' is prepended if needed. -% Not set by default. +% Not set by default. % % See also: tempname % diff --git a/+cv/textureFlattening.m b/+cv/textureFlattening.m index ce9541047..d1187838e 100644 --- a/+cv/textureFlattening.m +++ b/+cv/textureFlattening.m @@ -1,7 +1,7 @@ %TEXTUREFLATTENING Texture Flattening % -% dst = cv.textureFlattening(src, mask) -% dst = cv.textureFlattening(src, mask, 'OptionName',optionValue, ...) +% dst = cv.textureFlattening(src, mask) +% dst = cv.textureFlattening(src, mask, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit 3-channel image. @@ -15,14 +15,14 @@ % * __HighThreshold__ Value > 100. default 45 % * __KernelSize__ The size of the Sobel kernel to be used. default 3 % * __FlipChannels__ whether to flip the order of color channels in inputs -% `src` and `mask` and output `dst`, between MATLAB's RGB order and -% OpenCV's BGR (input: RGB->BGR, output: BGR->RGB). default false +% `src` and `mask` and output `dst`, between MATLAB's RGB order and OpenCV's +% BGR (input: RGB->BGR, output: BGR->RGB). default false % % By retaining only the gradients at edge locations, before integrating with % the Poisson solver, one washes out the texture of the selected region, % giving its contents a flat aspect. Here Canny Edge Detector is used. % -% ## Note +% ### Note % The algorithm assumes that the color of the source image is close to that of % the destination. This assumption means that when the colors don't match, the % source image color gets tinted toward the color of the destination image. diff --git a/+cv/threshold.m b/+cv/threshold.m index 517842405..15f8b6d63 100644 --- a/+cv/threshold.m +++ b/+cv/threshold.m @@ -1,16 +1,15 @@ %THRESHOLD Applies a fixed-level threshold to each array element % -% dst = cv.threshold(src, thresh) -% dst = cv.threshold(src, thresh, 'OptionName',optionValue, ...) -% [dst, thresh] = cv.threshold(src, 'auto', ...) +% dst = cv.threshold(src, thresh) +% dst = cv.threshold(src, thresh, 'OptionName',optionValue, ...) +% [dst, thresh] = cv.threshold(src, 'auto', ...) % % ## Input % * __src__ Input array (single or multiple channel, 8-bit, 16-bit, or -% floating point). +% floating-point). % * __thresh__ Threshold value. Scalar numeric value or one of the strings: -% * __Otsu__ use Otsu algorithm to choose the optimal threshold value -% * __Triangle__ use Triangle algorithm to choose the optimal threshold -% value +% * __Otsu__ use Otsu algorithm to choose the optimal threshold value +% * __Triangle__ use Triangle algorithm to choose the optimal threshold value % % ## Output % * __dst__ Output array of the same size and type as `src`. @@ -18,13 +17,13 @@ % % ## Options % * __MaxValue__ Maximum value to use with the 'Binary' and 'BinaryInv' -% thresholding types. default 255 +% thresholding types. default 255 % * __Type__ Thresholding type, default 'Binary'. One of: -% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxVal : 0` -% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxVal` -% * __Trunc__ `dst(x,y) = (src(x,y) > thresh) ? thresh : src(x,y)` -% * __ToZero__ `dst(x,y) = (src(x,y) > thresh) ? src(x,y) : 0` -% * __ToZeroInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : src(x,y)` +% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxVal : 0` +% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxVal` +% * __Trunc__ `dst(x,y) = (src(x,y) > thresh) ? thresh : src(x,y)` +% * __ToZero__ `dst(x,y) = (src(x,y) > thresh) ? src(x,y) : 0` +% * __ToZeroInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : src(x,y)` % % The function applies fixed-level thresholding to a multiple-channel array. % The function is typically used to get a bi-level (binary) image out of a diff --git a/+cv/transform.m b/+cv/transform.m index 419f7575a..54e80ef80 100644 --- a/+cv/transform.m +++ b/+cv/transform.m @@ -1,26 +1,26 @@ %TRANSFORM Performs the matrix transformation of every array element % -% dst = cv.transform(src, mtx) +% dst = cv.transform(src, mtx) % % ## Input % * __src__ Source array that must have as many channels (1 to 4) as columns -% of `mtx` or columns-1 of `mtx` +% of `mtx` or columns-1 of `mtx` % * __mtx__ floating-point transformation matrix. % % ## Output % * __dst__ Destination array of the same row/column size and depth as `src`. -% It has as many channels as rows of `mtx` +% It has as many channels as rows of `mtx` % % The function cv.transform performs the matrix transformation of every % element of the array `src` and stores the results in `dst`: % % * when columns of `mtx` equal channels of `src`: % -% dst(I) = mtx * src(I) +% dst(I) = mtx * src(I) % % * when columns of `mtx` equal channels+1 of `src`: % -% dst(I) = mtx * [src(I); 1] +% dst(I) = mtx * [src(I); 1] % % Every element of the N-channel array `src` is interpreted as N-element % vector that is transformed using the MxN or Mx(N+1) matrix `mtx` to @@ -35,22 +35,22 @@ % ## Example % This function is equivalent to the following MATLAB code: % -% function dst = my_transform(src, mtx) -% % check sizes -% [I,J,N] = size(src); -% [MM,NN] = size(mtx); -% assert(N==1 || N==2 || N==3 || N==4, '1 to 4 channels'); -% assert(N==NN || (N+1)==NN, 'Wrong dimensions'); -% if N ~= NN, src(:,:,end+1) = 1; end -% -% % transform -% dst = zeros([I,J,MM], class(src)); -% for i=1:I -% for j=1:J -% dst(i,j,:) = mtx * squeeze(src(i,j,:)); -% end -% end -% end +% function dst = my_transform(src, mtx) +% % check sizes +% [I,J,N] = size(src); +% [MM,NN] = size(mtx); +% assert(N==1 || N==2 || N==3 || N==4, '1 to 4 channels'); +% assert(N==NN || (N+1)==NN, 'Wrong dimensions'); +% if N ~= NN, src(:,:,end+1) = 1; end +% +% % transform +% dst = zeros([I,J,MM], class(src)); +% for i=1:I +% for j=1:J +% dst(i,j,:) = mtx * squeeze(src(i,j,:)); +% end +% end +% end % % See also: cv.perspectiveTransform, cv.getAffineTransform, % cv.estimateAffine2D, cv.estimateRigidTransform, cv.warpAffine, diff --git a/+cv/triangulatePoints.m b/+cv/triangulatePoints.m index 96a088c05..b706824ff 100644 --- a/+cv/triangulatePoints.m +++ b/+cv/triangulatePoints.m @@ -1,26 +1,26 @@ %TRIANGULATEPOINTS Reconstructs points by triangulation % -% points4D = cv.triangulatePoints(projMatr1, projMatr2, projPoints1, projPoints2) +% points4D = cv.triangulatePoints(projMatr1, projMatr2, projPoints1, projPoints2) % % ## Input % * __projMatr1__ 3x4 projection matrix of the first camera. % * __projMatr2__ 3x4 projection matrix of the second camera. % * __projPoints1__ 2xN array of feature points in the first image. It can be -% also a cell array of feature points `{[x,y], ...}` or two-channel -% array of size 1xNx2 or Nx1x2. +% also a cell array of feature points `{[x,y], ...}` or two-channel array of +% size 1xNx2 or Nx1x2. % * __projPoints2__ 2xN array of corresponding points in the second image. It -% can be also a cell array of feature points `{[x,y], ...}` or -% two-channel array of size 1xNx2 or Nx1x2. +% can be also a cell array of feature points `{[x,y], ...}` ortwo-channel +% array of size 1xNx2 or Nx1x2. % % ## Output % * __points4D__ 4xN array of reconstructed points in homogeneous coordinates -% `[[x;y;z;w], ...]` +% `[[x;y;z;w], ...]` % % The function reconstructs 3-dimensional points (in homogeneous coordinates) % by using their observations with a stereo camera. Projections matrices can % be obtained from cv.stereoRectify. % -% ## Note +% ### Note % Keep in mind that all input data should be of float type in order for % this function to work (`single` or `double`). % diff --git a/+cv/undistort.m b/+cv/undistort.m index 66ba4905d..4a1240688 100644 --- a/+cv/undistort.m +++ b/+cv/undistort.m @@ -1,23 +1,23 @@ %UNDISTORT Transforms an image to compensate for lens distortion % -% dst = cv.undistort(src, cameraMatrix, distCoeffs) -% dst = cv.undistort(..., 'OptionName', optionValue, ...) +% dst = cv.undistort(src, cameraMatrix, distCoeffs) +% dst = cv.undistort(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ Input (distorted) image. % * __cameraMatrix__ Input camera matrix `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. % % ## Output % * __dst__ Output (corrected) image that has the same size and type as `src`. % % ## Options % * __NewCameraMatrix__ Camera matrix of the distorted image. By default, it -% is the same as `cameraMatrix` but you may additionally scale and shift -% the result by using a different matrix. Not set by default +% is the same as `cameraMatrix` but you may additionally scale and shift the +% result by using a different matrix. Not set by default % % The function transforms an image to compensate radial and tangential lens % distortion. diff --git a/+cv/undistortPoints.m b/+cv/undistortPoints.m index dadd38ac5..f97a95de9 100644 --- a/+cv/undistortPoints.m +++ b/+cv/undistortPoints.m @@ -1,31 +1,34 @@ %UNDISTORTPOINTS Computes the ideal point coordinates from the observed point coordinates % -% dst = cv.undistortPoints(src, cameraMatrix, distCoeffs) -% dst = cv.undistortPoints(..., 'OptionName', optionValue, ...) +% dst = cv.undistortPoints(src, cameraMatrix, distCoeffs) +% dst = cv.undistortPoints(..., 'OptionName', optionValue, ...) % % ## Input % * __src__ Observed point coordinates. An Nx2, 1xNx2, or Nx1x2 array (either -% `single` or `double`). +% `single` or `double`). % * __cameraMatrix__ Input camera matrix `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ Input vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 -% elements. If the vector is empty, the zero distortion coefficients are -% assumed. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy]` of 4, 5, 8, 12 or 14 +% elements. If the vector is empty, the zero distortion coefficients are +% assumed. % % ## Output % * __dst__ Output ideal point coordinates after undistortion and reverse -% perspective transformation. If matrix `P` is identity or omitted, -% `dst` will contain normalized point coordinates. Same size and type as -% the input points `src`. +% perspective transformation. If matrix `P` is identity or omitted, `dst` +% will contain normalized point coordinates. Same size and type as the input +% points `src`. % % ## Options % * __R__ Rectification transformation in the object space (3x3 matrix). `R1` -% or `R2` computed by cv.stereoRectify can be passed here. If the matrix -% is empty, the identity transformation is used. default empty +% or `R2` computed by cv.stereoRectify can be passed here. If the matrix is +% empty, the identity transformation is used. default empty % * __P__ New camera matrix (3x3) or new projection matrix (3x4) -% `P = [fxp 0 cxp tx; 0 fyp cyp ty; 0 0 tz]`. `P1` or `P2` computed by -% cv.stereoRectify can be passed here. If the matrix is empty, the -% identity new camera matrix is used. default empty +% `P = [fxp 0 cxp tx; 0 fyp cyp ty; 0 0 tz]`. `P1` or `P2` computed by +% cv.stereoRectify can be passed here. If the matrix is empty, the identity +% new camera matrix is used. default empty +% * __Criteria__ Termination criteria for the iterative distortion +% compensation. By default does 5 iterations to compute undistorted points. +% default `struct('type','Count', 'maxCount',5, 'epsilon',0.01)` % % The function is similar to cv.undistort and cv.initUndistortRectifyMap but % it operates on a sparse set of points instead of a raster image. Also the @@ -35,16 +38,16 @@ % % For each observed point coordinate `(u,v)` the function computes: % -% % (u,v) is the input point, (up, vp) is the output point -% xpp = (u - cx)/fx -% ypp = (v - cy)/fy -% (xp,yp) = undistort(xpp, ypp, distCoeffs) -% [X,Y,W]' = R*[xp yp 1]' -% x = X/W -% y = Y/W -% % only performed if P is specified: -% up = x*fxp + cxp -% vp = y*fyp + cyp +% % (u,v) is the input point, (up, vp) is the output point +% xpp = (u - cx)/fx +% ypp = (v - cy)/fy +% (xp,yp) = undistort(xpp, ypp, distCoeffs) +% [X,Y,W]' = R*[xp yp 1]' +% x = X/W +% y = Y/W +% % only performed if P is specified: +% up = x*fxp + cxp +% vp = y*fyp + cyp % % where `undistort` is an approximate iterative algorithm that estimates the % normalized original point coordinates out of the normalized distorted point diff --git a/+cv/validateDisparity.m b/+cv/validateDisparity.m index 316ec5f9d..1771cdcaa 100644 --- a/+cv/validateDisparity.m +++ b/+cv/validateDisparity.m @@ -1,25 +1,24 @@ %VALIDATEDISPARITY Validates disparity using the left-right check % -% disparity = cv.validateDisparity(disparity, cost) -% disparity = cv.validateDisparity(disparity, cost, 'OptionName',optionValue, ...) +% disparity = cv.validateDisparity(disparity, cost) +% disparity = cv.validateDisparity(disparity, cost, 'OptionName',optionValue, ...) % % ## Input % * __disparity__ disparity map, 1-channel 16-bit signed integer array. % * __cost__ Cost matrix computed by the stereo correspondence algorithm. An -% array of same size as `disparity` and 16-bit or 32-bit signed integer -% type. +% array of same size as `disparity` and 16-bit or 32-bit signed integer type. % % ## Output % * __disparity__ validated disparity map. % % ## Options % * __MinDisparity__ Minimum possible disparity value. Normally, it is zero -% but sometimes rectification algorithms can shift images, so this -% parameter needs to be adjusted accordingly. default 0 +% but sometimes rectification algorithms can shift images, so this parameter +% needs to be adjusted accordingly. default 0 % * __NumDisparities__ Maximum disparity minus minimum disparity. The value is -% always greater than zero. default 64 +% always greater than zero. default 64 % * __Disp12MaxDisp__ Maximum allowed difference (in integer pixel units) in -% the left-right disparity check. default 1 +% the left-right disparity check. default 1 % % See also: cv.StereoBM, cv.StereoSGBM, cv.filterSpeckles, % cv.getValidDisparityROI, diff --git a/+cv/warpAffine.m b/+cv/warpAffine.m index 53efd0da0..75696a803 100644 --- a/+cv/warpAffine.m +++ b/+cv/warpAffine.m @@ -1,7 +1,7 @@ %WARPAFFINE Applies an affine transformation to an image % -% dst = cv.warpAffine(src, M) -% dst = cv.warpAffine(src, M, 'OptionName',optionValue, ...) +% dst = cv.warpAffine(src, M) +% dst = cv.warpAffine(src, M, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input image. @@ -9,42 +9,40 @@ % % ## Output % * __dst__ Output image that has the size `DSize` (with -% `size(dst,3) == size(src,3)`) and the same type as `src`. +% `size(dst,3) == size(src,3)`) and the same type as `src`. % % ## Options % * __DSize__ Size of the output image `[w,h]`. Default `[0,0]` means using -% the same size as the input `[size(src,2) size(src,1)]`. +% the same size as the input `[size(src,2) size(src,1)]`. % * __Interpolation__ interpolation method, default 'Linear'. One of: -% * __Nearest__ nearest neighbor interpolation -% * __Linear__ bilinear interpolation -% * __Cubic__ bicubic interpolation -% * __Lanczos4__ Lanczos interpolation over 8x8 neighborhood +% * __Nearest__ nearest neighbor interpolation +% * __Linear__ bilinear interpolation +% * __Cubic__ bicubic interpolation +% * __Lanczos4__ Lanczos interpolation over 8x8 neighborhood % * __WarpInverse__ Logical flag to apply inverse affine transform, meaning -% that `M` is the inverse transformation (`dst -> src`). default false +% that `M` is the inverse transformation (`dst -> src`). default false % * __BorderType__ Pixel extrapolation method. When 'Transparent', it means -% that the pixels in the destination image corresponding to the -% "outliers" in the source image are not modified by the function. -% default 'Constant' -% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` -% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` -% * __Reflect__ `fedcba|abcdefgh|hgfedcb` -% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` -% * __Wrap__ `cdefgh|abcdefgh|abcdefg` -% * __Transparent__ `uvwxyz|absdefgh|ijklmno` -% * __Default__ same as 'Reflect101' +% that the pixels in the destination image corresponding to the "outliers" +% in the source image are not modified by the function. default 'Constant' +% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` +% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` +% * __Reflect__ `fedcba|abcdefgh|hgfedcb` +% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` +% * __Wrap__ `cdefgh|abcdefgh|abcdefg` +% * __Transparent__ `uvwxyz|absdefgh|ijklmno` +% * __Default__ same as 'Reflect101' % * __BorderValue__ Value used in case of a constant border. default 0 % * __Dst__ Optional initial image for the output. If not set, it is -% automatically created by the function. Note that it must match the -% expected size `DSize` and the type of `src`, otherwise it is ignored -% and recreated by the function. This option is only useful when -% `BorderType=Transparent`, in which case the transformed image is drawn -% onto the existing `Dst` without extrapolating pixels. Not set by -% default. +% automatically created by the function. Note that it must match the +% expected size `DSize` and the type of `src`, otherwise it is ignored and +% recreated by the function. This option is only useful when +% `BorderType=Transparent`, in which case the transformed image is drawn +% onto the existing `Dst` without extrapolating pixels. Not set by default. % % The function cv.warpAffine transforms the source image using the specified % matrix: % -% dst(x,y) = src(M_11*x + M_12*y + M_13, M_21*x + M_22*y + M_23) +% dst(x,y) = src(M_11*x + M_12*y + M_13, M_21*x + M_22*y + M_23) % % when the `WarpInverse` option is true. Otherwise, the transformation is first % inverted with cv.invertAffineTransform and then put in the formula above diff --git a/+cv/warpPerspective.m b/+cv/warpPerspective.m index 9931c7a3a..ff9544658 100644 --- a/+cv/warpPerspective.m +++ b/+cv/warpPerspective.m @@ -1,7 +1,7 @@ %WARPPERSPECTIVE Applies a perspective transformation to an image % -% dst = cv.warpPerspective(src, M) -% dst = cv.warpPerspective(src, M, 'OptionName',optionValue, ...) +% dst = cv.warpPerspective(src, M) +% dst = cv.warpPerspective(src, M, 'OptionName',optionValue, ...) % % ## Input % * __src__ Input image. @@ -9,48 +9,45 @@ % % ## Output % * __dst__ Output image that has the size `DSize` (with -% `size(dst,3) == size(src,3)`) and the same type as `src`. +% `size(dst,3) == size(src,3)`) and the same type as `src`. % % ## Options % * __DSize__ Size of the output image `[w,h]`. Default `[0,0]` means using -% the same size as the input `[size(src,2) size(src,1)]`. +% the same size as the input `[size(src,2) size(src,1)]`. % * __Interpolation__ interpolation method, default 'Linear'. One of: -% * __Nearest__ nearest neighbor interpolation -% * __Linear__ bilinear interpolation -% * __Cubic__ bicubic interpolation -% * __Lanczos4__ Lanczos interpolation over 8x8 neighborhood +% * __Nearest__ nearest neighbor interpolation +% * __Linear__ bilinear interpolation +% * __Cubic__ bicubic interpolation +% * __Lanczos4__ Lanczos interpolation over 8x8 neighborhood % * __WarpInverse__ Logical flag to apply inverse perspective transform, -% meaning that `M` is the inverse transformation (`dst -> src`). -% default false +% meaning that `M` is the inverse transformation (`dst -> src`). +% default false % * __BorderType__ Pixel extrapolation method. When 'Transparent', it means -% that the pixels in the destination image corresponding to the -% "outliers" in the source image are not modified by the function. -% default 'Constant' -% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` -% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` -% * __Reflect__ `fedcba|abcdefgh|hgfedcb` -% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` -% * __Wrap__ `cdefgh|abcdefgh|abcdefg` -% * __Transparent__ `uvwxyz|absdefgh|ijklmno` -% * __Default__ same as 'Reflect101' +% that the pixels in the destination image corresponding to the "outliers" +% in the source image are not modified by the function. default 'Constant' +% * __Constant__ `iiiiii|abcdefgh|iiiiiii` with some specified `i` +% * __Replicate__ `aaaaaa|abcdefgh|hhhhhhh` +% * __Reflect__ `fedcba|abcdefgh|hgfedcb` +% * __Reflect101__ `gfedcb|abcdefgh|gfedcba` +% * __Wrap__ `cdefgh|abcdefgh|abcdefg` +% * __Transparent__ `uvwxyz|absdefgh|ijklmno` +% * __Default__ same as 'Reflect101' % * __BorderValue__ Value used in case of a constant border. default 0 % * __Dst__ Optional initial image for the output. If not set, it is -% automatically created by the function. Note that it must match the -% expected size `DSize` and the type of `src`, otherwise it is ignored -% and recreated by the function. This option is only useful when -% `BorderType=Transparent`, in which case the transformed image is drawn -% onto the existing `Dst` without extrapolating pixels. Not set by -% default. +% automatically created by the function. Note that it must match the +% expected size `DSize` and the type of `src`, otherwise it is ignored and +% recreated by the function. This option is only useful when +% `BorderType=Transparent`, in which case the transformed image is drawn +% onto the existing `Dst` without extrapolating pixels. Not set by default. % % The function cv.warpPerspective transforms the source image using the % specified matrix: % -% dst(x,y) = src((M_11*x + M_12*y + M_13) / (M_31*x + M_32*y + M_33), -% (M_21*x + M_22*y + M_23) / (M_31*x + M_32*y + M_33)) +% dst(x,y) = src((M_11*x + M_12*y + M_13) / (M_31*x + M_32*y + M_33), +% (M_21*x + M_22*y + M_23) / (M_31*x + M_32*y + M_33)) % -% when the `WarpInverse` option is true. Otherwise, the transformation is -% first inverted with cv.invert and then put in the formula above instead of -% `M`. +% when the `WarpInverse` option is true. Otherwise, the transformation is first +% inverted with cv.invert and then put in the formula above instead of `M`. % % See also: cv.warpAffine, cv.remap, cv.resize, cv.getRectSubPix, % cv.perspectiveTransform, imtransform, imwarp diff --git a/+cv/watershed.m b/+cv/watershed.m index 305bab4a8..9dda912b2 100644 --- a/+cv/watershed.m +++ b/+cv/watershed.m @@ -1,15 +1,15 @@ %WATERSHED Performs a marker-based image segmentation using the watershed algrorithm % -% marker = cv.watershed(image, marker) +% marker = cv.watershed(image, marker) % % ## Input % * __image__ Input 8-bit 3-channel image. -% * __marker__ Input 32-bit single-channel image (map) of markers. It -% should have the same size as `image`. +% * __marker__ Input 32-bit single-channel image (map) of markers. It should +% have the same size as `image`. % % ## Output -% * __marker__ Output 32-bit single-channel image (map) of markers. It has -% the same size as `image`. +% * __marker__ Output 32-bit single-channel image (map) of markers. It has the +% same size as `image`. % % The function implements one of the variants of watershed, non-parametric % marker-based segmentation algorithm, described in [Meyer92]. @@ -24,7 +24,7 @@ % be set to 0's. In the function output, each pixel in markers is set to a % value of the "seed" components or to -1 at boundaries between the regions. % -% ## Note +% ### Note % Any two neighbor connected components are not necessarily separated by % a watershed boundary (-1 pixels); for example, they can touch each other in % the initial marker image passed to the function. diff --git a/+mexopencv/isOctave.m b/+mexopencv/isOctave.m index 68f1ac3d2..b34d63c68 100644 --- a/+mexopencv/isOctave.m +++ b/+mexopencv/isOctave.m @@ -1,7 +1,7 @@ function retval = isOctave() %ISOCTAVE Return true if the environment is Octave % - % retval = mexopencv.isOctave() + % retval = mexopencv.isOctave() % % ## Output % * __retval__ true if running in Octave, false otherwise (MATLAB). diff --git a/+mexopencv/make.m b/+mexopencv/make.m index 63db15926..b177bd052 100644 --- a/+mexopencv/make.m +++ b/+mexopencv/make.m @@ -1,48 +1,49 @@ function make(varargin) %MAKE Compile MEX-functions % -% mexopencv.make -% mexopencv.make('OptionName', optionValue, ...) +% mexopencv.make +% mexopencv.make('OptionName', optionValue, ...) % % Make builds mexopencv library. In Unix, this function invokes Makefile % in the project root. In Windows, the function takes an optional argument % to specify installed OpenCV path. % % ## Options -% * **opencv_path** string specifying the path to OpenCV installation -% default `'C:\OpenCV\build'` +% * **opencv_path** string specifying the path to OpenCV installation. +% default `'C:\OpenCV\build'` % * **opencv_contrib** flag to indicate whether optional opencv modules are -% available or not. These can only be selected in OpenCV at compile-time. -% default `false`. +% available or not. These can only be selected in OpenCV at compile-time. +% default `false`. % * __clean__ clean all compiled MEX files. default `false` % * __test__ run all unit-tests. default `false` % * __dryrun__ dont actually run commands, just print them. default `false` % * __force__ Unconditionally build all files. default `false` -% * __verbose__ output verbosity. The higher the number, the more output -% is shown. default 1 -% * __0__ no output at all -% * __1__ echo commands and information messages only -% * __2__ verbose output from mex -% * __3__ enables all compiler warnings -% * __4__ enables verbose compiler/linker output +% * __verbose__ output verbosity. The higher the number, the more output is +% shown. default 1 +% * __0__ no output at all +% * __1__ echo commands and information messages only +% * __2__ verbose output from mex +% * __3__ enables all compiler warnings +% * __4__ enables verbose compiler/linker output % * __progress__ show a progress bar GUI during compilation (Windows only). -% default `true` -% * __debug__ Produce binaries with debugging information, linked against -% the debug version of OpenCV libraries. default false -% * __extra__ extra arguments passed to Unix make command. default `''` +% default `true` +% * __debug__ Produce binaries with debugging information, linked against the +% debug version of OpenCV libraries. default false +% * __extra__ extra arguments passed to Unix make command. default empty +% string % % ## Example % -% mexopencv.make('opencv_path', pathname) % Windows only -% mexopencv.make(..., 'opencv_contrib', true) % build with contrib modules -% mexopencv.make('clean',true) % clean MEX files -% mexopencv.make('test',true) % run unittests -% mexopencv.make('dryrun',true, 'force',true) % print commands used to build -% mexopencv.make(..., 'verbose',2) % verbose compiler output -% mexopencv.make(..., 'progress',true) % show progress bar -% mexopencv.make(..., 'debug',true) % enalbe debugging symbols -% mexopencv.make('extra','--jobs=2') % instruct Make to execute N -% % jobs in parallel (Unix only) +% mexopencv.make('opencv_path', pathname) % Windows only +% mexopencv.make(..., 'opencv_contrib', true) % build with contrib modules +% mexopencv.make('clean',true) % clean MEX files +% mexopencv.make('test',true) % run unittests +% mexopencv.make('dryrun',true, 'force',true) % print commands used to build +% mexopencv.make(..., 'verbose',2) % verbose compiler output +% mexopencv.make(..., 'progress',true) % show progress bar +% mexopencv.make(..., 'debug',true) % enalbe debugging symbols +% mexopencv.make('extra','--jobs=2') % instruct Make to execute N +% % jobs in parallel (Unix only) % % See also: mex % diff --git a/+mexopencv/require.m b/+mexopencv/require.m index 5036fb956..2878446f2 100644 --- a/+mexopencv/require.m +++ b/+mexopencv/require.m @@ -1,7 +1,7 @@ function [status, v] = require(name) %REQUIRE Check if a toolbox/package is available % - % [status, v] = mexopencv.require(name) + % [status, v] = mexopencv.require(name) % % ## Input % * __name__ name of toolbox/packge to check. diff --git a/.ci/cvsetup.m b/.ci/cvsetup.m new file mode 100644 index 000000000..819a11c87 --- /dev/null +++ b/.ci/cvsetup.m @@ -0,0 +1,91 @@ +function cvsetup(isContrib, rootCV) + if nargin < 1, isContrib = false; end + if nargin < 2, rootCV = getenv('OPENCV_DIR'); end + rootMCV = fileparts(fileparts(mfilename('fullpath'))); + + % initializations for Octave + if isOctave() + crash_dumps_octave_core(false); + more('off'); + warning('off', 'Octave:shadowed-function'); + warning('off', 'Octave:GraphicsMagic-Quantum-Depth'); + try, pkg('load', 'statistics'); end + try, pkg('load', 'image'); end + end + + % add root dir of OpenCV to PATH env var + if ispc() && isdir(rootCV) + try + p = fullfile(rootCV, arch(), compiler(), 'bin'); + setenv('PATH', [p pathsep() getenv('PATH')]); + end + end + + % add root dir of mexopencv to MATLAB path + addpath(rootMCV); + if isContrib + addpath(fullfile(rootMCV, 'opencv_contrib')); + end + if isOctave() + % HACK: we have to also add private directories to path in Octave + % http://savannah.gnu.org/bugs/?45444 + addpath(fullfile(rootMCV, '+cv', 'private')); + if isContrib + addpath(fullfile(rootMCV, 'opencv_contrib', '+cv', 'private')); + end + end + + % tests + addpath(fullfile(rootMCV, 'test')); + addpath(fullfile(rootMCV, 'test', 'unit_tests')); + if isContrib + addpath(fullfile(rootMCV, 'opencv_contrib', 'test', 'unit_tests')); + end + + % samples + addpath(fullfile(rootMCV, 'samples')); + addpath(fullfile(rootMCV, 'samples', 'common')); + if isContrib + addpath(fullfile(rootMCV, 'opencv_contrib', 'samples')); + end + + % docs and utils + addpath(fullfile(rootMCV, 'utils')); + addpath(fullfile(rootMCV, 'doc')); +end + +function b = isOctave() + b = exist('OCTAVE_VERSION', 'builtin') == 5; +end + +function s = arch() + if isOctave() + pattern64 = 'x86_64'; + else + pattern64 = '64'; + end + if isempty(strfind(computer('arch'), pattern64)) + s = 'x86'; + else + s = 'x64'; + end +end + +function s = compiler() + if isOctave() + s = 'mingw'; + else + cc = mex.getCompilerConfigurations('C++', 'Selected'); + assert(~isempty(cc)); + switch cc.Manufacturer + case 'Microsoft' + assert(~isempty(strfind(cc.Name, 'Visual'))); + s = ['vc' sscanf(cc.Version, '%d', 1)]; + case 'GNU' + s = 'mingw'; + otherwise + s = ''; + end + assert(~isempty(s)); + end +end diff --git a/.gitignore b/.gitignore index f55e7e633..ae1be6458 100644 --- a/.gitignore +++ b/.gitignore @@ -23,13 +23,18 @@ octave-workspace # generated docs /doc/html/ -/doc/latex/ /doc/matlab/ +/doc/wiki/ +/doc/helpsearch*/ +/doc/helpfuncbycat.xml # published samples /samples/html/ /opencv_contrib/samples/html/ +# downloaded Java packages for generating docs +/utils/jars/*/*.jar + # downloaded files for samples and tests /test/768x576.avi /test/car.avi @@ -62,3 +67,7 @@ octave-workspace /test/imageText[NR].png /test/licenseplate_motion.jpg /test/aero[13].jpg +/test/peilin_*.png +/test/faceocc2.webm +/test/dudek.webm +/test/david.webm diff --git a/.travis.yml b/.travis.yml index 7ab139dd1..ab58cb319 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,10 @@ dist: trusty # full Ubuntu VM (sudo enabled) vs. container-based environment sudo: required +# shallow-ish clone (default 50) +git: + depth: 3 + # whitelisted branches to build branches: only: @@ -27,53 +31,55 @@ env: - INSTALL_PREFIX=/usr/local # Octave options + optional Octave-Forge packages to install - # (Note: latest image/stats packages requires a C++11 compiler to build, + # (Note: latest image/stats packages require a C++11 compiler to build, # Trusty has gcc-4.8 and must explicitly pass -std=c++11) - - OCTAVERC=$HOME/.octaverc - - OCTAVE_OPTS="--no-gui --no-window-system" - - OCT_STATS=yes + - OCTAVE_OPTS="--no-gui --no-window-system --no-init-file --quiet" - OCT_IMAGE=yes + - OCT_STATS=yes - CXXFLAGS="-std=c++11" # mexopencv options # (Note: dont use too many Make parallel jobs on VM) - MCV_ROOT=$(pwd) - - MAKE_OPTS="WITH_OCTAVE=true WITH_CONTRIB=true NO_CV_PKGCONFIG_HACK=true" + - MAKE_OPTS="WITH_OCTAVE=true WITH_CONTRIB=true" - MAKE_EXTRA=-j2 - - DOXY=yes + - WITH_DOXY=yes install: #HACK: Travis adds a PPA for Redis, but it also contains GraphicsMagick lib # which conflicts with the one Octave was compiled against, so we remove it - sudo rm -f /etc/apt/sources.list.d/rwky-redis.list - # third-party PPA with Octave 4.0.2 + # third-party PPA with Octave 4.2.1 - sudo add-apt-repository -y ppa:octave/stable - sudo apt-get update -qq # install build tools: CMake, Ninja, pkg-config, Doxygen - sudo apt-get install -y cmake pkg-config - if [ "$CMAKE_GEN" = "Ninja" ]; then sudo apt-get install -y ninja-build ; fi - - if [ "$DOXY" = "yes" ]; then sudo apt-get install -y doxygen ; fi + - if [ "$WITH_DOXY" = "yes" ]; then sudo apt-get install -y doxygen ; fi # install Octave + optional statistics/image packages - sudo apt-get install -y octave liboctave-dev + - if [ "$OCT_IMAGE" = "yes" ]; then sudo apt-get install -y octave-image ; fi - if [ "$OCT_STATS" = "yes" ]; then octave-cli $OCTAVE_OPTS --eval "try, pkg install -forge -local io statistics, end" ; fi - - if [ "$OCT_IMAGE" = "yes" ]; then octave-cli $OCTAVE_OPTS --eval "try, pkg install -forge -local image, end" ; fi # install some OpenCV dependencies - - sudo apt-get install -y zlib1g-dev libjpeg8-dev libpng12-dev libtiff4-dev libjasper-dev libopenexr-dev + - sudo apt-get install -y zlib1g-dev libjpeg8-dev libpng12-dev libtiff5-dev libjasper-dev libopenexr-dev - sudo apt-get install -y libavcodec-dev libavformat-dev libswscale-dev - - sudo apt-get install -y libxine2-dev libv4l-dev libdc1394-22-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev + - sudo apt-get install -y libv4l-dev libdc1394-22-dev libxine2-dev libgphoto2-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev - sudo apt-get install -y libgtk2.0-dev libtbb-dev libeigen3-dev libblas-dev liblapack-dev liblapacke-dev libatlas-base-dev - sudo apt-get install -y libhdf5-dev libprotobuf-dev libgflags-dev libgoogle-glog-dev - - # build OpenCV 3.3.0 from source (opencv + opencv_contrib) - - mkdir $HOME/cv330 && pushd $HOME/cv330 - - wget -O opencv.zip https://github.com/opencv/opencv/archive/3.3.0.zip - - wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/3.3.0.zip - - unzip opencv.zip > /dev/null - - unzip opencv_contrib.zip > /dev/null + #- sudo apt-get install -y tesseract-ocr libtesseract-dev libleptonica-dev + #- sudo apt-get install -y libgdcm2-dev libgdal-dev + #- sudo apt-get install -y libgtk-3-dev libvtk6-dev libopenblas-dev + + # build OpenCV 3.3.1 from source (opencv + opencv_contrib) + - mkdir $HOME/cv && pushd $HOME/cv + - wget -O opencv-3.3.1.zip https://github.com/opencv/opencv/archive/3.3.1.zip + - wget -O opencv_contrib-3.3.1.zip https://github.com/opencv/opencv_contrib/archive/3.3.1.zip + - unzip opencv-3.3.1.zip > /dev/null + - unzip opencv_contrib-3.3.1.zip > /dev/null - mkdir build && cd build - cmake -G "$CMAKE_GEN" -Wno-dev -DBUILD_DOCS:BOOL=OFF @@ -81,16 +87,33 @@ install: -DBUILD_PACKAGE:BOOL=OFF -DBUILD_PERF_TESTS:BOOL=OFF -DBUILD_TESTS:BOOL=OFF + -DBUILD_WITH_DEBUG_INFO:BOOL=OFF + -DBUILD_ITT:BOOL=OFF + -DCV_TRACE:BOOL=OFF + -DENABLE_PYLINT:BOOL=OFF -DWITH_CUDA:BOOL=OFF + -DWITH_CUBLAS:BOOL=OFF -DWITH_CUFFT:BOOL=OFF + -DWITH_NVCUVID:BOOL=OFF -DWITH_ITT:BOOL=OFF -DWITH_MATLAB:BOOL=OFF -DWITH_OPENCL:BOOL=OFF -DWITH_VTK:BOOL=OFF - -DCV_TRACE:BOOL=OFF -DBUILD_opencv_apps:BOOL=OFF - -DBUILD_opencv_cuda:BOOL=OFF + -DBUILD_opencv_cudaarithm:BOOL=OFF + -DBUILD_opencv_cudabgsegm:BOOL=OFF + -DBUILD_opencv_cudacodec:BOOL=OFF + -DBUILD_opencv_cudafeatures2d:BOOL=OFF + -DBUILD_opencv_cudafilters:BOOL=OFF + -DBUILD_opencv_cudaimgproc:BOOL=OFF + -DBUILD_opencv_cudalegacy:BOOL=OFF + -DBUILD_opencv_cudaobjdetect:BOOL=OFF + -DBUILD_opencv_cudaoptflow:BOOL=OFF + -DBUILD_opencv_cudastereo:BOOL=OFF + -DBUILD_opencv_cudawarping:BOOL=OFF + -DBUILD_opencv_cudev:BOOL=OFF -DBUILD_opencv_java:BOOL=OFF + -DBUILD_opencv_js:BOOL=OFF -DBUILD_opencv_python2:BOOL=OFF -DBUILD_opencv_python3:BOOL=OFF -DBUILD_opencv_ts:BOOL=OFF @@ -104,10 +127,10 @@ install: -DBUILD_opencv_sfm:BOOL=OFF -DBUILD_opencv_structured_light:BOOL=OFF -DBUILD_opencv_surface_matching:BOOL=OFF - -DCMAKE_BUILD_TYPE:STRING=RELEASE + -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_INSTALL_PREFIX:PATH=$INSTALL_PREFIX -DOPENCV_ENABLE_NONFREE:BOOL=ON - -DOPENCV_EXTRA_MODULES_PATH:PATH=$HOME/cv330/opencv_contrib-3.3.0/modules $HOME/cv330/opencv-3.3.0 + -DOPENCV_EXTRA_MODULES_PATH:PATH=$HOME/cv/opencv_contrib-3.3.1/modules $HOME/cv/opencv-3.3.1 - cmake --build . - sudo cmake --build . --target install - popd @@ -118,29 +141,13 @@ install: - sudo sh -c 'echo "$INSTALL_PREFIX/lib" > /etc/ld.so.conf.d/opencv.conf' - sudo ldconfig -before_script: - # create .octaverc file (to setup path and load required packages on start) - #HACK: we also add private directories on path, http://savannah.gnu.org/bugs/?45444 - - touch $OCTAVERC - - echo "crash_dumps_octave_core(false);" >> $OCTAVERC - - echo "more off" >> $OCTAVERC - - if [ "$OCT_STATS" = "yes" ]; then echo "try, pkg load statistics, end" >> $OCTAVERC ; fi - - if [ "$OCT_IMAGE" = "yes" ]; then echo "try, pkg load image, end" >> $OCTAVERC ; fi - - echo "warning('off', 'Octave:GraphicsMagic-Quantum-Depth');" >> $OCTAVERC - - echo "warning('off', 'Octave:shadowed-function');" >> $OCTAVERC - - echo "cd('$MCV_ROOT');" >> $OCTAVERC - - echo "addpath('$MCV_ROOT');" >> $OCTAVERC - - echo "addpath(fullfile('$MCV_ROOT','+cv','private'));" >> $OCTAVERC - - echo "addpath(fullfile('$MCV_ROOT','opencv_contrib'));" >> $OCTAVERC - - echo "addpath(fullfile('$MCV_ROOT','opencv_contrib','+cv','private'));" >> $OCTAVERC - script: # compile mexopencv - cd $MCV_ROOT - make $MAKE_OPTS $MAKE_EXTRA all contrib # build docs - - if [ "$DOXY" = "yes" ]; then make $MAKE_OPTS doc ; fi + - if [ "$WITH_DOXY" = "yes" ]; then make $MAKE_OPTS doc ; fi # run test suite - make $MAKE_OPTS test diff --git a/Contents.m b/Contents.m new file mode 100644 index 000000000..f6d3d8776 --- /dev/null +++ b/Contents.m @@ -0,0 +1,493 @@ +% mexopencv +% Version 3.3.1 (R2017a) 26-November-2017 +% +%% opencv: Main Modules +% +% core: Core Functionality +% cv.borderInterpolate - Computes the source location of an extrapolated pixel +% cv.copyMakeBorder - Forms a border around an image +% cv.add - Calculates the per-element sum of two arrays or an array and a scalar +% cv.subtract - Calculates the per-element difference between two arrays or array and a scalar +% cv.multiply - Calculates the per-element scaled product of two arrays +% cv.divide - Performs per-element division of two arrays or a scalar by an array +% cv.addWeighted - Calculates the weighted sum of two arrays +% cv.convertScaleAbs - Scales, calculates absolute values, and converts the result to 8-bit +% cv.convertFp16 - Converts an array to half precision floating number +% cv.LUT - Performs a look-up table transform of an array +% cv.norm - Calculates absolute array norm, absolute difference norm, or relative difference norm +% cv.PSNR - Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric +% cv.batchDistance - Naive nearest neighbor finder +% cv.normalize - Normalizes the norm or value range of an array +% cv.flip - Flips a 2D array around vertical, horizontal, or both axes +% cv.rotate - FLIP Rotates a 2D array in multiples of 90 degrees +% cv.bitwise_and - Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar +% cv.bitwise_or - Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar +% cv.bitwise_xor - Calculates the per-element bit-wise "exclusive or" operation on two arrays or an array and a scalar +% cv.bitwise_not - Inverts every bit of an array +% cv.absdiff - Calculates the per-element absolute difference between two arrays or between an array and a scalar +% cv.inRange - Checks if array elements lie between the elements of two other arrays +% cv.compare - Performs the per-element comparison of two arrays or an array and scalar value +% cv.polarToCart - Calculates x and y coordinates of 2D vectors from their magnitude and angle +% cv.cartToPolar - Calculates the magnitude and angle of 2D vectors +% cv.phase - Calculates the rotation angle of 2D vectors +% cv.magnitude - Calculates the magnitude of 2D vectors +% cv.transform - Performs the matrix transformation of every array element +% cv.perspectiveTransform - Performs the perspective matrix transformation of vectors +% cv.invert - Finds the inverse or pseudo-inverse of a matrix +% cv.solve - Solves one or more linear systems or least-squares problems +% cv.eigen - Calculates eigenvalues and eigenvectors of a symmetric matrix +% cv.eigenNonSymmetric - Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only) +% cv.calcCovarMatrix - Calculates the covariance matrix of a set of vectors +% cv.Mahalanobis - Calculates the Mahalanobis distance between two vectors +% cv.dft - Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array +% cv.dct - Performs a forward or inverse discrete Cosine transform of 1D or 2D array +% cv.mulSpectrums - Performs the per-element multiplication of two Fourier spectrums +% cv.getOptimalDFTSize - Returns the optimal DFT size for a given vector size +% cv.setRNGSeed - Sets state of default random number generator +% cv.PCA - Principal Component Analysis class +% cv.LDA - Linear Discriminant Analysis +% cv.SVD - Singular Value Decomposition +% cv.kmeans - Finds centers of clusters and groups input samples around the clusters +% cv.Rect - Class for 2D rectangles +% cv.RotatedRect - The class represents rotated (i.e. not up-right) rectangles on a plane +% cv.copyTo - Copies the matrix to another one +% cv.convertTo - Converts an array to another data type with optional scaling +% cv.FileStorage - Reading from or writing to a XML/YAML/JSON file storage +% cv.tempfile - Return name of a temporary file +% cv.glob - Find all pathnames matching a specified pattern +% cv.Utils - Utility and system information functions +% cv.getBuildInformation - Returns OpenCV build information +% cv.TickMeter - A class to measure passing time +% cv.DownhillSolver - Non-linear non-constrained minimization of a function +% cv.ConjGradSolver - Non-linear non-constrained minimization of a function with known gradient +% cv.solveLP - Solve given (non-integer) linear programming problem using the Simplex Algorithm +% +% imgproc: Image Processing +% cv.GeneralizedHoughBallard - Generalized Hough transform +% cv.GeneralizedHoughGuil - Generalized Hough transform +% cv.CLAHE - Contrast Limited Adaptive Histogram Equalization +% cv.Subdiv2D - Delaunay triangulation and Voronoi tesselation +% cv.LineSegmentDetector - Line segment detector class +% cv.getGaussianKernel - Returns Gaussian filter coefficients +% cv.getDerivKernels - Returns filter coefficients for computing spatial image derivatives +% cv.getGaborKernel - Returns Gabor filter coefficients +% cv.getStructuringElement - Returns a structuring element of the specified size and shape for morphological operations +% cv.medianBlur - Blurs an image using the median filter +% cv.GaussianBlur - Smoothes an image using a Gaussian filter +% cv.bilateralFilter - Applies the bilateral filter to an image +% cv.boxFilter - Blurs an image using the box filter +% cv.sqrBoxFilter - Calculates the normalized sum of squares of the pixel values overlapping the filter +% cv.blur - Smoothes an image using the normalized box filter +% cv.filter2D - Convolves an image with the kernel +% cv.sepFilter2D - Applies a separable linear filter to an image +% cv.Sobel - Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator +% cv.spatialGradient - Calculates the first order image derivative in both x and y using a Sobel operator +% cv.Scharr - Calculates the first x- or y- image derivative using Scharr operator +% cv.Laplacian - Calculates the Laplacian of an image +% cv.Canny - Finds edges in an image using the Canny algorithm +% cv.Canny2 - Finds edges in an image using the Canny algorithm with custom image gradient +% cv.cornerMinEigenVal - Calculates the minimal eigenvalue of gradient matrices for corner detection +% cv.cornerHarris - Harris corner detector +% cv.cornerEigenValsAndVecs - Calculates eigenvalues and eigenvectors of image blocks for corner detection +% cv.preCornerDetect - Calculates a feature map for corner detection +% cv.cornerSubPix - Refines the corner locations +% cv.goodFeaturesToTrack - Determines strong corners on an image +% cv.HoughLines - Finds lines in a binary image using the standard Hough transform +% cv.HoughLinesP - Finds line segments in a binary image using the probabilistic Hough transform +% cv.HoughCircles - Finds circles in a grayscale image using the Hough transform +% cv.erode - Erodes an image by using a specific structuring element +% cv.dilate - Dilates an image by using a specific structuring element +% cv.morphologyEx - Performs advanced morphological transformations +% cv.resize - Resizes an image +% cv.warpAffine - Applies an affine transformation to an image +% cv.warpPerspective - Applies a perspective transformation to an image +% cv.remap - Applies a generic geometrical transformation to an image +% cv.convertMaps - Converts image transformation maps from one representation to another +% cv.getRotationMatrix2D - Calculates an affine matrix of 2D rotation +% cv.getAffineTransform - Calculates an affine transform from three pairs of corresponding points +% cv.invertAffineTransform - Inverts an affine transformation +% cv.getPerspectiveTransform - Calculates a perspective transform from four pairs of the corresponding points +% cv.getRectSubPix - Retrieves a pixel rectangle from an image with sub-pixel accuracy +% cv.logPolar - Remaps an image to semilog-polar coordinates space +% cv.linearPolar - Remaps an image to polar coordinates space +% cv.integral - Calculates the integral of an image +% cv.accumulate - Adds an image to the accumulator +% cv.accumulateSquare - Adds the square of a source image to the accumulator +% cv.accumulateProduct - Adds the per-element product of two input images to the accumulator +% cv.accumulateWeighted - Updates a running average +% cv.phaseCorrelate - Detect translational shifts that occur between two images +% cv.createHanningWindow - Computes a Hanning window coefficients in two dimensions +% cv.threshold - Applies a fixed-level threshold to each array element +% cv.adaptiveThreshold - Applies an adaptive threshold to an array +% cv.pyrDown - Blurs an image and downsamples it +% cv.pyrUp - Upsamples an image and then blurs it +% cv.buildPyramid - Constructs the Gaussian pyramid for an image +% cv.undistort - Transforms an image to compensate for lens distortion +% cv.initUndistortRectifyMap - Computes the undistortion and rectification transformation map +% cv.initWideAngleProjMap - Initializes maps for cv.remap for wide-angle +% cv.getDefaultNewCameraMatrix - Returns the default new camera matrix +% cv.undistortPoints - Computes the ideal point coordinates from the observed point coordinates +% cv.calcHist - Calculates a histogram of a set of arrays +% cv.calcBackProject - Calculates the back projection of a histogram +% cv.compareHist - Compares two histograms +% cv.equalizeHist - Equalizes the histogram of a grayscale image +% cv.EMD - Computes the "minimal work" distance between two weighted point configurations +% cv.watershed - Performs a marker-based image segmentation using the watershed algrorithm +% cv.pyrMeanShiftFiltering - Performs initial step of meanshift segmentation of an image +% cv.grabCut - Runs the GrabCut algorithm +% cv.distanceTransform - Calculates the distance to the closest zero pixel for each pixel of the source image +% cv.floodFill - Fills a connected component with the given color +% cv.cvtColor - Converts an image from one color space to another +% cv.demosaicing - Demosaicing algorithm +% cv.moments - Calculates all of the moments up to the third order of a polygon or rasterized shape +% cv.HuMoments - Calculates seven Hu invariants +% cv.matchTemplate - Compares a template against overlapped image regions +% cv.connectedComponents - Computes the connected components labeled image of boolean image +% cv.findContours - Finds contours in a binary image +% cv.approxPolyDP - Approximates a polygonal curve(s) with the specified precision +% cv.arcLength - Calculates a contour perimeter or a curve length +% cv.boundingRect - Calculates the up-right bounding rectangle of a point set +% cv.contourArea - Calculates a contour area +% cv.minAreaRect - Finds a rotated rectangle of the minimum area enclosing the input 2D point set +% cv.boxPoints - Finds the four vertices of a rotated rectangle +% cv.minEnclosingCircle - Finds a circle of the minimum area enclosing a 2D point set +% cv.minEnclosingTriangle - Finds a triangle of minimum area enclosing a 2D point set and returns its area +% cv.matchShapes - Compares two shapes +% cv.convexHull - Finds the convex hull of a point set +% cv.convexityDefects - Finds the convexity defects of a contour +% cv.isContourConvex - Tests a contour convexity +% cv.intersectConvexConvex - Finds intersection of two convex polygons +% cv.fitEllipse - Fits an ellipse around a set of 2D points +% cv.fitLine - Fits a line to a 2D or 3D point set +% cv.pointPolygonTest - Performs a point-in-contour test +% cv.rotatedRectangleIntersection - Finds out if there is any intersection between two rotated rectangles +% cv.blendLinear - Performs linear blending of two images +% cv.applyColorMap - Applies a GNU Octave/MATLAB equivalent colormap on a given image +% cv.line - Draws a line segment connecting two points +% cv.arrowedLine - Draws an arrow segment pointing from the first point to the second one +% cv.rectangle - Draws a simple, thick, or filled up-right rectangle +% cv.circle - Draws a circle +% cv.ellipse - Draws a simple or thick elliptic arc or fills an ellipse sector +% cv.drawMarker - Draws a marker on a predefined position in an image +% cv.fillConvexPoly - Fills a convex polygon +% cv.fillPoly - Fills the area bounded by one or more polygons +% cv.polylines - Draws several polygonal curves +% cv.drawContours - Draws contours outlines or filled contours +% cv.clipLine - Clips the line against the image rectangle +% cv.ellipse2Poly - Approximates an elliptic arc with a polyline +% cv.putText - Draws a text string +% cv.getTextSize - Calculates the width and height of a text string +% cv.LineIterator - Raster line iterator +% +% imgcodecs: Image File Reading and Writing +% cv.imread - Loads an image from a file +% cv.imreadmulti - Loads a multi-page image from a file +% cv.imwrite - Saves an image to a specified file +% cv.imdecode - Reads an image from a buffer in memory +% cv.imencode - Encodes an image into a memory buffer +% +% videoio: Video I/O +% cv.VideoCapture - Class for video capturing from video files or cameras +% cv.VideoWriter - Video Writer class +% +% video: Video Analysis +% cv.CamShift - Finds an object center, size, and orientation +% cv.meanShift - Finds an object on a back projection image +% cv.buildOpticalFlowPyramid - Constructs the image pyramid which can be passed to cv.calcOpticalFlowPyrLK +% cv.calcOpticalFlowPyrLK - Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids +% cv.calcOpticalFlowFarneback - Computes a dense optical flow using the Gunnar Farneback's algorithm +% cv.estimateRigidTransform - Computes an optimal affine transformation between two 2D point sets +% cv.findTransformECC - Finds the geometric transform (warp) between two images in terms of the ECC criterion +% cv.KalmanFilter - Kalman filter class +% cv.DualTVL1OpticalFlow - "Dual TV L1" Optical Flow Algorithm +% cv.FarnebackOpticalFlow - Dense optical flow using the Gunnar Farneback's algorithm +% cv.SparsePyrLKOpticalFlow - Class used for calculating a sparse optical flow +% cv.BackgroundSubtractorMOG2 - Gaussian Mixture-based Background/Foreground Segmentation Algorithm +% cv.BackgroundSubtractorKNN - K-nearest neigbours based Background/Foreground Segmentation Algorithm +% +% calib3d: Camera Calibration and 3D Reconstruction +% cv.Rodrigues - Converts a rotation matrix to a rotation vector or vice versa +% cv.findHomography - Finds a perspective transformation between two planes +% cv.RQDecomp3x3 - Computes an RQ decomposition of 3x3 matrices +% cv.decomposeProjectionMatrix - Decomposes a projection matrix into a rotation matrix and a camera matrix +% cv.matMulDeriv - Computes partial derivatives of the matrix product for each multiplied matrix +% cv.composeRT - Combines two rotation-and-shift transformations +% cv.projectPoints - Projects 3D points to an image plane +% cv.solvePnP - Finds an object pose from 3D-2D point correspondences +% cv.solvePnPRansac - Finds an object pose from 3D-2D point correspondences using the RANSAC scheme +% cv.solveP3P - Finds an object pose from 3 3D-2D point correspondences +% cv.initCameraMatrix2D - Finds an initial camera matrix from 3D-2D point correspondences +% cv.findChessboardCorners - Finds the positions of internal corners of the chessboard +% cv.find4QuadCornerSubpix - Finds subpixel-accurate positions of the chessboard corners +% cv.drawChessboardCorners - Renders the detected chessboard corners +% cv.findCirclesGrid - Finds the centers in the grid of circles +% cv.calibrateCamera - Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern +% cv.calibrationMatrixValues - Computes useful camera characteristics from the camera matrix +% cv.stereoCalibrate - Calibrates the stereo camera +% cv.stereoRectify - Computes rectification transforms for each head of a calibrated stereo camera +% cv.stereoRectifyUncalibrated - Computes a rectification transform for an uncalibrated stereo camera +% cv.rectify3Collinear - Computes the rectification transformations for 3-head camera, where all the heads are on the same line +% cv.getOptimalNewCameraMatrix - Returns the new camera matrix based on the free scaling parameter +% cv.convertPointsToHomogeneous - Converts points from Euclidean to homogeneous space +% cv.convertPointsFromHomogeneous - Converts points from homogeneous to Euclidean space +% cv.findFundamentalMat - Calculates a fundamental matrix from the corresponding points in two images +% cv.findEssentialMat - Calculates an essential matrix from the corresponding points in two images +% cv.decomposeEssentialMat - Decompose an essential matrix to possible rotations and translation +% cv.recoverPose - Recover relative camera rotation and translation from an estimated essential matrix and the corresponding points in two images, using cheirality check +% cv.computeCorrespondEpilines - For points in an image of a stereo pair, computes the corresponding epilines in the other image +% cv.triangulatePoints - Reconstructs points by triangulation +% cv.correctMatches - Refines coordinates of corresponding points +% cv.filterSpeckles - Filters off small noise blobs (speckles) in the disparity map +% cv.getValidDisparityROI - Computes valid disparity ROI from the valid ROIs of the rectified images +% cv.validateDisparity - Validates disparity using the left-right check +% cv.reprojectImageTo3D - Reprojects a disparity image to 3D space +% cv.sampsonDistance - Calculates the Sampson Distance between two points +% cv.estimateAffine3D - Computes an optimal affine transformation between two 3D point sets +% cv.estimateAffine2D - Computes an optimal affine transformation between two 2D point sets +% cv.estimateAffinePartial2D - Computes an optimal limited affine transformation with 4 degrees of freedom between two 2D point sets +% cv.decomposeHomographyMat - Decompose a homography matrix to rotation(s), translation(s) and plane normal(s) +% cv.StereoBM - Class for computing stereo correspondence using the block matching algorithm +% cv.StereoSGBM - Class for computing stereo correspondence using the semi-global block matching algorithm +% +% features2d: 2D Features Framework +% cv.KeyPointsFilter - Methods to filter a vector of keypoints +% cv.FeatureDetector - Common interface of 2D image Feature Detectors +% cv.DescriptorExtractor - Common interface of 2D image Descriptor Extractors +% cv.BRISK - Class implementing the BRISK keypoint detector and descriptor extractor +% cv.ORB - Class implementing the ORB (oriented BRIEF) keypoint detector and descriptor extractor +% cv.MSER - Maximally Stable Extremal Region extractor +% cv.FAST - Detects corners using the FAST algorithm +% cv.FastFeatureDetector - Wrapping class for feature detection using the FAST method +% cv.AGAST - Detects corners using the AGAST algorithm +% cv.AgastFeatureDetector - Wrapping class for feature detection using the AGAST method +% cv.GFTTDetector - Wrapping class for feature detection using the goodFeaturesToTrack function +% cv.SimpleBlobDetector - Class for extracting blobs from an image +% cv.KAZE - Class implementing the KAZE keypoint detector and descriptor extractor +% cv.AKAZE - Class implementing the AKAZE keypoint detector and descriptor extractor +% cv.DescriptorMatcher - Common interface for matching keypoint descriptors +% cv.drawKeypoints - Draws keypoints +% cv.drawMatches - Draws the found matches of keypoints from two images +% cv.evaluateFeatureDetector - Evaluates a feature detector +% cv.computeRecallPrecisionCurve - Evaluate a descriptor extractor by computing precision/recall curve +% cv.BOWKMeansTrainer - KMeans-based class to train visual vocabulary using the bag of visual words approach +% cv.BOWImgDescriptorExtractor - Class to compute an image descriptor using the bag of visual words +% +% objdetect: Object Detection +% cv.SimilarRects - Class for grouping object candidates, detected by Cascade Classifier, HOG etc. +% cv.groupRectangles - Groups the object candidate rectangles +% cv.groupRectangles_meanshift - Groups the object candidate rectangles using meanshift +% cv.CascadeClassifier - Haar Feature-based Cascade Classifier for Object Detection +% cv.HOGDescriptor - Histogram of Oriented Gaussian (HOG) descriptor and object detector +% cv.DetectionBasedTracker - Detection-based tracker +% +% dnn: Deep Neural Network +% cv.Net - Create and manipulate comprehensive artificial neural networks +% +% ml: Machine Learning +% cv.NormalBayesClassifier - Bayes classifier for normally distributed data +% cv.KNearest - The class implements K-Nearest Neighbors model +% cv.SVM - Support Vector Machines +% cv.EM - Expectation Maximization Algorithm +% cv.DTrees - Decision Trees +% cv.RTrees - Random Trees +% cv.Boost - Boosted tree classifier derived from cv.DTrees +% cv.ANN_MLP - Artificial Neural Networks - Multi-Layer Perceptrons +% cv.LogisticRegression - Logistic Regression classifier +% cv.SVMSGD - Stochastic Gradient Descent SVM classifier +% cv.randMVNormal - Generates sample from multivariate normal distribution +% cv.createConcentricSpheresTestSet - Creates test set +% +% photo: Computational Photography +% cv.inpaint - Restores the selected region in an image using the region neighborhood +% cv.fastNlMeansDenoising - Image denoising using Non-local Means Denoising algorithm +% cv.fastNlMeansDenoisingColored - Modification of fastNlMeansDenoising function for colored images +% cv.fastNlMeansDenoisingMulti - Modification of fastNlMeansDenoising function for colored images sequences +% cv.fastNlMeansDenoisingColoredMulti - Modification of fastNlMeansDenoisingMulti function for colored images sequences +% cv.denoise_TVL1 - Primal-Dual algorithm to perform image denoising +% cv.Tonemap - Tonemapping algorithm used to map HDR image to 8-bit range +% cv.TonemapDrago - Tonemapping algorithm used to map HDR image to 8-bit range +% cv.TonemapDurand - Tonemapping algorithm used to map HDR image to 8-bit range +% cv.TonemapReinhard - Tonemapping algorithm used to map HDR image to 8-bit range +% cv.TonemapMantiuk - Tonemapping algorithm used to map HDR image to 8-bit range +% cv.AlignMTB - Aligns images of the same scene with different exposures +% cv.CalibrateDebevec - Camera Response Calibration algorithm +% cv.CalibrateRobertson - Camera Response Calibration algorithm +% cv.MergeDebevec - Merge exposure sequence to a single image +% cv.MergeMertens - Merge exposure sequence to a single image +% cv.MergeRobertson - Merge exposure sequence to a single image +% cv.decolor - Transforms a color image to a grayscale image +% cv.seamlessClone - Seamless Cloning +% cv.colorChange - Color Change +% cv.illuminationChange - Illumination Change +% cv.textureFlattening - Texture Flattening +% cv.edgePreservingFilter - Edge-preserving smoothing filter +% cv.detailEnhance - This filter enhances the details of a particular image +% cv.pencilSketch - Pencil-like non-photorealistic line drawing +% cv.stylization - Stylization filter +% +% stitching: Images Stitching +% cv.RotationWarper - Rotation-only model image warper +% cv.FeaturesFinder - Feature finders class +% cv.FeaturesMatcher - Feature matchers class +% cv.Estimator - Rotation estimator base class +% cv.BundleAdjuster - Class for all camera parameters refinement methods +% cv.ExposureCompensator - Class for all exposure compensators +% cv.SeamFinder - Class for all seam estimators +% cv.Blender - Class for all image blenders +% cv.Timelapser - Timelapser class +% cv.Stitcher - High level image stitcher +% +% shape: Shape Distance and Matching +% cv.EMDL1 - Computes the "minimal work" distance between two weighted point configurations +% cv.ShapeTransformer - Base class for shape transformation algorithms +% cv.ShapeContextDistanceExtractor - Implementation of the Shape Context descriptor and matching algorithm +% cv.HausdorffDistanceExtractor - A simple Hausdorff distance measure between shapes defined by contours +% +% superres: Super Resolution +% cv.SuperResolution - Class for a whole family of Super Resolution algorithms +% +% videostab: Video Stabilization +% cv.estimateGlobalMotionLeastSquares - Estimates best global motion between two 2D point clouds in the least-squares sense +% cv.estimateGlobalMotionRansac - Estimates best global motion between two 2D point clouds robustly (using RANSAC method) +% cv.calcBlurriness - Calculate image blurriness +% cv.OnePassStabilizer - A one-pass video stabilizer +% cv.TwoPassStabilizer - A two-pass video stabilizer +% +%% opencv_contrib: Extra Modules +% +% aruco: ArUco Marker Detection +% cv.dictionaryDump - Dump dictionary (aruco) +% cv.detectMarkers - Basic ArUco marker detection +% cv.estimatePoseSingleMarkers - Pose estimation for single markers +% cv.boardDump - Dump board (aruco) +% cv.estimatePoseBoard - Pose estimation for a board of markers +% cv.refineDetectedMarkers - Refind not detected markers based on the already detected and the board layout +% cv.drawDetectedMarkers - Draw detected markers in image +% cv.drawAxis - Draw coordinate system axis from pose estimation +% cv.drawMarkerAruco - Draw a canonical marker image +% cv.drawPlanarBoard - Draw a planar board +% cv.calibrateCameraAruco - Calibrate a camera using aruco markers +% cv.getBoardObjectAndImagePoints - Given a board configuration and a set of detected markers, returns the corresponding image points and object points to call solvePnP +% cv.drawCharucoBoard - Draw a ChArUco board +% cv.interpolateCornersCharuco - Interpolate position of ChArUco board corners +% cv.estimatePoseCharucoBoard - Pose estimation for a ChArUco board given some of their corners +% cv.drawDetectedCornersCharuco - Draws a set of Charuco corners +% cv.calibrateCameraCharuco - Calibrate a camera using Charuco corners +% cv.detectCharucoDiamond - Detect ChArUco Diamond markers +% cv.drawDetectedDiamonds - Draw a set of detected ChArUco Diamond markers +% cv.drawCharucoDiamond - Draw a ChArUco Diamond marker +% +% bgsegm: Improved Background-Foreground Segmentation Methods +% cv.BackgroundSubtractorMOG - Gaussian Mixture-based Background/Foreground Segmentation Algorithm +% cv.BackgroundSubtractorGMG - Background Subtractor module +% cv.BackgroundSubtractorCNT - Background subtraction based on counting +% +% bioinspired: Biologically Inspired Vision Models and Derivated Tools +% cv.Retina - A biological retina model for image spatio-temporal noise and luminance changes enhancement +% cv.RetinaFastToneMapping - Class with tone mapping algorithm of Meylan et al. (2007) +% cv.TransientAreasSegmentationModule - Class which provides a transient/moving areas segmentation module +% +% datasets: Framework for Working with Different Datasets +% cv.Dataset - Class for working with different datasets +% +% dpm: Deformable Part-based Models +% cv.DPMDetector - Deformable Part-based Models (DPM) detector +% +% face: Face Recognition +% cv.BasicFaceRecognizer - Face Recognition based on Eigen-/Fisher-faces +% cv.LBPHFaceRecognizer - Face Recognition based on Local Binary Patterns +% cv.BIF - Implementation of bio-inspired features (BIF) +% +% img_hash: Image Hashing Algorithms +% cv.ImgHash - Base class for Image Hashing algorithms +% +% line_descriptor: Binary Descriptors for Lines Extracted from an Image +% cv.BinaryDescriptor - Class implements both functionalities for detection of lines and computation of their binary descriptor +% cv.LSDDetector - Line Segment Detector +% cv.BinaryDescriptorMatcher - BinaryDescriptor matcher class +% cv.drawLineMatches - Draws the found matches of keylines from two images +% cv.drawKeylines - Draws keylines +% +% optflow: Optical Flow Algorithms +% cv.OpticalFlowPCAFlow - PCAFlow algorithm +% cv.GPCForest - Implementation of the Global Patch Collider algorithm +% cv.calcOpticalFlowSF - Calculate an optical flow using "SimpleFlow" algorithm +% cv.calcOpticalFlowDF - DeepFlow optical flow algorithm implementation +% cv.calcOpticalFlowSparseToDense - Fast dense optical flow based on PyrLK sparse matches interpolation +% cv.readOpticalFlow - Read a .flo file +% cv.writeOpticalFlow - Write a .flo to disk +% cv.VariationalRefinement - Variational optical flow refinement +% cv.DISOpticalFlow - DIS optical flow algorithm +% cv.updateMotionHistory - Updates the motion history image by a moving silhouette +% cv.calcMotionGradient - Calculates a gradient orientation of a motion history image +% cv.calcGlobalOrientation - Calculates a global motion orientation in a selected region +% cv.segmentMotion - Splits a motion history image into a few parts corresponding to separate independent motions (for example, left hand, right hand) +% +% plot: Plot Function for Mat Data +% cv.Plot2d - Class to plot 2D data +% +% saliency: Saliency API +% cv.StaticSaliencySpectralResidual - The Spectral Residual approach for Static Saliency +% cv.StaticSaliencyFineGrained - The Fine Grained Saliency approach for Static Saliency +% cv.MotionSaliencyBinWangApr2014 - A Fast Self-tuning Background Subtraction Algorithm for Motion Saliency +% cv.ObjectnessBING - The Binarized normed gradients algorithm for Objectness +% +% xfeatures2d: Extra 2D Features Framework +% cv.SIFT - Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) +% cv.SURF - Class for extracting Speeded Up Robust Features from an image +% cv.FREAK - Class implementing the FREAK (Fast Retina Keypoint) keypoint descriptor +% cv.StarDetector - The class implements the Star keypoint detector +% cv.BriefDescriptorExtractor - Class for computing BRIEF descriptors +% cv.LUCID - Class implementing the Locally Uniform Comparison Image Descriptor +% cv.LATCH - Class for computing the LATCH descriptor +% cv.DAISY - Class implementing DAISY descriptor +% cv.MSDDetector - Class implementing the MSD (Maximal Self-Dissimilarity) keypoint detector +% cv.VGG - Class implementing VGG (Oxford Visual Geometry Group) descriptor +% cv.BoostDesc - Class implementing BoostDesc (Learning Image Descriptors with Boosting) +% cv.PCTSignatures - Class implementing PCT (Position-Color-Texture) signature extraction +% cv.PCTSignaturesSQFD - Class implementing Signature Quadratic Form Distance (SQFD) +% cv.HarrisLaplaceFeatureDetector - Class implementing the Harris-Laplace feature detector. +% cv.AffineFeature2D - Class implementing affine adaptation for key points +% cv.FASTForPointSet - Estimates cornerness for prespecified KeyPoints using the FAST algorithm +% +% ximgproc: Extended Image Processing +% cv.DTFilter - Interface for realizations of Domain Transform filter +% cv.GuidedFilter - Interface for realizations of Guided Filter +% cv.AdaptiveManifoldFilter - Interface for Adaptive Manifold Filter realizations +% cv.jointBilateralFilter - Applies the joint bilateral filter to an image +% cv.bilateralTextureFilter - Applies the bilateral texture filter to an image +% cv.rollingGuidanceFilter - Applies the rolling guidance filter to an image +% cv.FastGlobalSmootherFilter - Interface for implementations of Fast Global Smoother filter +% cv.l0Smooth - Global image smoothing via L0 gradient minimization +% cv.DisparityWLSFilter - Disparity map filter based on Weighted Least Squares filter +% cv.EdgeAwareInterpolator - Sparse match interpolation algorithm +% cv.StructuredEdgeDetection - Class implementing edge detection algorithm +% cv.SuperpixelSEEDS - Class implementing the SEEDS (Superpixels Extracted via Energy-Driven Sampling) superpixels algorithm +% cv.SuperpixelSLIC - Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels algorithm +% cv.SuperpixelLSC - Class implementing the LSC (Linear Spectral Clustering) superpixels algorithm +% cv.GraphSegmentation - Graph Based Segmentation algorithm +% cv.SelectiveSearchSegmentation - Selective search segmentation algorithm +% cv.FastHoughTransform - Calculates 2D Fast Hough transform of an image +% cv.HoughPoint2Line - Calculates coordinates of line segment corresponded by point in Hough space +% cv.FastLineDetector - Class implementing the FLD (Fast Line Detector) algorithm +% cv.covarianceEstimation - Computes the estimated covariance matrix of an image using the sliding window forumlation +% cv.weightedMedianFilter - Applies weighted median filter to an image +% cv.GradientPaillou - Applies Paillou filter to an image +% cv.GradientDeriche - Applies Deriche filter to an image +% cv.PeiLinNormalization - Calculates an affine transformation that normalize given image using Pei/Lin Normalization +% cv.niBlackThreshold - Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired +% cv.thinning - Applies a binary blob thinning operation, to achieve a skeletization of the input image +% cv.anisotropicDiffusion - Performs anisotropic diffusion on an image +% +% xobjdetect: Extended Object Detection +% cv.WBDetector - WaldBoost detector - Object Detection using Boosted Features +% +% xphoto: Additional Photo Processing Algorithms +% cv.inpaint2 - The function implements different single-image inpainting algorithms +% cv.SimpleWB - Simple white balance algorithm +% cv.GrayworldWB - Gray-world white balance algorithm +% cv.LearningBasedWB - More sophisticated learning-based automatic white balance algorithm +% cv.applyChannelGains - Implements an efficient fixed-point approximation for applying channel gains, which is the last step of multiple white balance algorithms +% cv.dctDenoising - The function implements simple dct-based denoising +% cv.bm3dDenoising - Performs image denoising using the Block-Matching and 3D-filtering algorithm +% diff --git a/Doxyfile b/Doxyfile index 313f05dc2..564600348 100644 --- a/Doxyfile +++ b/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = mexopencv # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 3.3.0 +PROJECT_NUMBER = 3.3.1 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -837,7 +837,8 @@ RECURSIVE = YES # Note that relative paths are relative to the directory from which doxygen is # run. -EXCLUDE = +EXCLUDE = doc/wiki/ \ + doc/matlab/ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded diff --git a/Makefile b/Makefile index 6a4036938..5caf4edd3 100644 --- a/Makefile +++ b/Makefile @@ -13,9 +13,8 @@ # MATLAB MATLAB/Octave executable. # MEX MATLAB/Octave MEX compiler frontend. # MEXEXT MATLAB/Octave extension of MEX-files. -# DOXYGEN Doxygen executable used to generate documentation. -# NO_CV_PKGCONFIG_HACK If set, disables fixing the output of pkg-config with -# OpenCV. Not set by default, meaning hack is applied. +# PKG_CONFIG_CV_HACK If set, attempts to fix the output of pkg-config with +# OpenCV. Not set by default (no hack). # PKG_CONFIG_OPENCV Name of OpenCV 3 pkg-config package. Default opencv. # CXXFLAGS Extra flags passed to the C++ MEX compiler. # LDFLAGS Extra flags passed to the linker by the compiler. @@ -35,6 +34,7 @@ # clean Deletes temporary and generated files. # doc Generates source documentation using Doxygen. # test Run MATLAB unit-tests. +# testci Similar to test, runs in batches (intended for CI). # # Note that the Makefile uses pkg-config to locate OpenCV, so you need to have # the opencv.pc file accessible from the PKG_CONFIG_PATH environment variable. @@ -47,13 +47,12 @@ ifdef WITH_OCTAVE MATLABDIR ?= /usr MEX ?= $(MATLABDIR)/bin/mkoctfile --mex -MATLAB ?= $(MATLABDIR)/bin/octave-cli --no-gui --no-window-system --quiet +MATLAB ?= $(MATLABDIR)/bin/octave-cli --no-gui --no-window-system --no-init-file --quiet else MATLABDIR ?= /usr/local/matlab MEX ?= $(MATLABDIR)/bin/mex -MATLAB ?= $(MATLABDIR)/bin/matlab -nodisplay -noFigureWindows -nosplash +MATLAB ?= $(MATLABDIR)/bin/matlab -nodesktop -nodisplay -noFigureWindows -nosplash endif -DOXYGEN ?= doxygen # file extensions OBJEXT ?= o @@ -79,7 +78,7 @@ ifneq ($(shell pkg-config --exists --atleast-version=3 $(PKG_CONFIG_OPENCV); ech endif CV_CFLAGS := $(shell pkg-config --cflags $(PKG_CONFIG_OPENCV)) CV_LDFLAGS := $(shell pkg-config --libs $(PKG_CONFIG_OPENCV)) -ifndef NO_CV_PKGCONFIG_HACK +ifdef PKG_CONFIG_CV_HACK LIB_SUFFIX := %.so %.dylib %.a %.la %.dll.a %.dll CV_LDFLAGS := $(filter-out $(LIB_SUFFIX),$(CV_LDFLAGS)) \ $(addprefix -L, \ @@ -136,7 +135,7 @@ vpath %.cpp opencv_contrib/src/+cv/private endif # special targets -.PHONY: all contrib clean doc test +.PHONY: all contrib clean doc test testci .SUFFIXES: .cpp .$(OBJEXT) .$(LIBEXT) .$(MEXEXT) # main targets @@ -184,14 +183,36 @@ clean: opencv_contrib/+cv/private/*.$(MEXEXT) doc: - $(DOXYGEN) Doxyfile + doxygen Doxyfile + +# test targets +ifdef WITH_CONTRIB +TEST_CONTRIB := true +else +TEST_CONTRIB := false +endif +TEST_CMD := \ + args = {'ContribModules',$(TEST_CONTRIB), 'Verbosity',2}; \ + letter = getenv('CI_TEST_LETTER'); if ~isempty(letter), \ + args = {args{:}, 'MatchPattern',['^Test' letter], 'XUnitFile','', \ + 'LogFile',sprintf('UnitTest_%s.log', letter)}; end, \ + cvsetup(args{2}); [~,pass] = UnitTest(args{:}); exit(~pass); +ALPHABET := A B C D E F G H I J K L M N O P Q R S T U V W X Y Z + +testci: +ifdef WITH_OCTAVE + cd .ci && for letter in $(ALPHABET); do \ + CI_TEST_LETTER=$$letter $(MATLAB) --eval "$(TEST_CMD)" || exit $$?; \ + done +else + cd .ci && for letter in $(ALPHABET); do \ + CI_TEST_LETTER=$$letter $(MATLAB) -r "$(TEST_CMD)" || exit $$?; \ + done +endif -#TODO: https://savannah.gnu.org/bugs/?41699 -# we can't always trust Octave's exit code on Windows! It throws 0xC0000005 -# on exit (access violation), even when it runs just fine. test: ifdef WITH_OCTAVE - $(MATLAB) --eval "addpath(pwd);cd test;try,UnitTest('ContribModules',$(WITH_CONTRIB),'Verbosity',2);catch e,disp(e);exit(1);end;exit(0);" || echo "Exit code: $$?" + cd .ci && $(MATLAB) --eval "$(TEST_CMD)" else - $(MATLAB) -r "addpath(pwd);cd test;try,UnitTest('ContribModules',$(WITH_CONTRIB),'Verbosity',2);catch e,disp(e.getReport);end;exit;" + cd .ci && $(MATLAB) -r "$(TEST_CMD)" endif diff --git a/README.markdown b/README.markdown index b8aeb7806..2cad0e734 100644 --- a/README.markdown +++ b/README.markdown @@ -58,7 +58,7 @@ Build Prerequisite - [MATLAB][4] or [Octave][5] (>= 4.0.0) -- [OpenCV][6] (3.3.0) +- [OpenCV][6] (3.3.1) Depending on your platform, you also need the required build tools: @@ -74,7 +74,7 @@ Refer to the [wiki][3] for detailed build instructions. OpenCV ------ -Currently, mexopencv targets the final **3.3.0** stable version of OpenCV. You +Currently, mexopencv targets the final **3.3.1** stable version of OpenCV. You must build it against this exact version, rather than using the bleeding-edge dev-version of `opencv` and `opencv_contrib`. UNIX users should consider using a package manager to install OpenCV if available. @@ -83,12 +83,12 @@ a package manager to install OpenCV if available. - [OpenCV contributed modules][8] **DO NOT use the "master" branch of `opencv` and `opencv_contrib`!** -**Only the 3.3.0 release is supported by mexopencv.** +**Only the 3.3.1 release is supported by mexopencv.** Linux ----- -First make sure you have OpenCV 3.3.0 installed in the system: +First make sure you have OpenCV 3.3.1 installed in the system: - if applicable, install OpenCV 3 package available in your package manager (e.g., `libopencv-dev` in Debian/Ubuntu, `opencv-devel` in Fedora). @@ -182,7 +182,7 @@ Contrib modules are enabled as: If you have previously compiled mexopencv with a different configuration, don't forget to clean old artifacts before building: - >> mexopencv.make(..., 'clean',true) + >> mexopencv.make('clean',true, 'opencv_contrib',true) Usage ===== @@ -253,14 +253,14 @@ The code may be redistributed under the [BSD 3-Clause license](LICENSE). [1]: https://travis-ci.org/kyamagu/mexopencv [2]: https://ci.appveyor.com/project/kyamagu/mexopencv [3]: https://github.com/kyamagu/mexopencv/wiki -[4]: https://www.mathworks.com/products/matlab/ +[4]: https://www.mathworks.com/products/matlab.html [5]: https://www.gnu.org/software/octave/ -[6]: http://opencv.org/ -[7]: https://github.com/opencv/opencv/releases/tag/3.3.0 -[8]: https://github.com/opencv/opencv_contrib/releases/tag/3.3.0 -[9]: http://packages.ubuntu.com/zesty/libopencv-dev +[6]: https://opencv.org/ +[7]: https://github.com/opencv/opencv/releases/tag/3.3.1 +[8]: https://github.com/opencv/opencv_contrib/releases/tag/3.3.1 +[9]: https://packages.ubuntu.com/zesty/libopencv-dev [10]: https://people.freedesktop.org/~dbn/pkg-config-guide.html -[11]: http://brew.sh/ +[11]: https://brew.sh/ [12]: http://kyamagu.github.io/mexopencv/matlab [13]: https://github.com/kyamagu/mexopencv/wiki/Installation-%28Windows%2C-MATLAB%2C-OpenCV-3%29 [14]: https://github.com/kyamagu/mexopencv/wiki/Installation-%28Linux%2C-MATLAB%2C-OpenCV-3%29 diff --git a/appveyor.yml b/appveyor.yml index dd337be32..5465a2c20 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -4,14 +4,15 @@ # # Note: # We use prepared opencv binaries built using the same MinGW/Octave config, -# see: https://github.com/amroamroamro/opencv/blob/tag_3.3.0/appveyor.yml +# see: https://github.com/amroamroamro/opencv/blob/tag_3.3.1/appveyor.yml # # version format -version: 3.3.0.{build} +version: 3.3.1.{build} # clone directory -clone_folder: c:\projects\mexopencv +clone_folder: c:\dev\mexopencv +shallow_clone: true # whitelisted branches to build branches: @@ -26,8 +27,8 @@ deploy: off # build artifacts to keep artifacts: - path: mexopencv.7z - - path: test\UnitTest_*.log - - path: test\tests.xml + - path: .ci\*.log + - path: .ci\*.xml # global environment variables environment: @@ -38,86 +39,58 @@ environment: # Octave options OCTAVE_HOME: c:\dev\octave-4.2.1 - OCTAVE: octave-cli --no-gui --no-window-system + OCTAVE_OPTS: --no-gui --no-window-system --no-init-file --quiet # mexopencv options # (build choices: mexopencv.make() in Octave, or Makefile in shell) # (Note: Make 3.81 included has poor jobs support, we use Make 4.2 instead) - MCV_ROOT: c:\projects\mexopencv + MCV_ROOT: c:\dev\mexopencv WITH_MAKE: yes - MAKE_OPTS: MATLABDIR='/c/dev/octave-4.2.1' WITH_OCTAVE=true WITH_CONTRIB=true NO_CV_PKGCONFIG_HACK=true + MAKE_OPTS: MATLABDIR='/c/dev/octave-4.2.1' WITH_OCTAVE=true WITH_CONTRIB=true MAKE_EXTRA: -j2 - DOXY: yes + BUILD_CMD: cvsetup(true); mexopencv.make('opencv_path',getenv('OPENCV_DIR'), 'opencv_contrib',true, 'progress',false, 'verbose',1); + TEST_CMD: cvsetup(true); [~,pass] = UnitTest('ContribModules',true,'Verbosity',2); exit(~pass); + WITH_DOXY: yes install: # install dependencies (Doxygen, Make, OpenCV, Octave) - - mkdir "c:\dev" && cd "c:\dev" - - if "%DOXY%" == "yes" ( choco install doxygen.portable -y > nul ) - - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.2.0/mingw32-make.exe" -FileName mingw32-make.exe - - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.3.0/cv330_x86_mingw.7z" -FileName cv330_x86_mingw.7z - - ps: Start-FileDownload "https://ftp.gnu.org/gnu/octave/windows/octave-4.2.1-w32.zip" -FileName octave-4.2.1-w32.zip - - 7z x "c:\dev\cv330_x86_mingw.7z" -o"c:\dev\build" -y > nul + - cd "c:\dev" + - if "%WITH_DOXY%" == "yes" ( choco install doxygen.portable -y -r ) + - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.2.0/mingw32-make.exe" + - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.3.1/cv331_x86_mingw.7z" + - ps: Start-FileDownload "https://ftp.gnu.org/gnu/octave/windows/octave-4.2.1-w32.zip" + - 7z x "c:\dev\cv331_x86_mingw.7z" -o"c:\dev\build" -y > nul - 7z x "c:\dev\octave-4.2.1-w32.zip" -o"c:\dev" -y > nul - copy /y "%OCTAVE_HOME%\bin\libopenblas.dll" "%OCTAVE_HOME%\bin\libblas.dll" > nul - set "PATH=%OCTAVE_HOME%\bin;%OPENCV_DIR%\x86\mingw\bin;c:\dev;%PATH%" -before_build: - # create .octaverc file (to setup path and load required packages on start) - # (one for SHELL=sh.exe and one for SHELL=cmd.exe) - #HACK: we also add private directories on path, http://savannah.gnu.org/bugs/?45444 - - ps: | - $OctaveRC = @" - crash_dumps_octave_core(false); - more off - try, pkg load statistics, end - try, pkg load image, end - warning('off', 'Octave:GraphicsMagic-Quantum-Depth'); - warning('off', 'Octave:shadowed-function'); - cd(getenv('MCV_ROOT')); - addpath(getenv('MCV_ROOT')); - addpath(fullfile(getenv('MCV_ROOT'),'+cv','private')); - addpath(fullfile(getenv('MCV_ROOT'),'opencv_contrib')); - addpath(fullfile(getenv('MCV_ROOT'),'opencv_contrib','+cv','private')); - "@ - $HomeDirs = @( - (Join-Path (Join-Path $env:OCTAVE_HOME 'home') $env:USERNAME), - $env:USERPROFILE - ) - $HomeDirs | ForEach-Object { - $OctaveRCFile = (Join-Path $_ '.octaverc') - New-Item -ItemType File -Path "$OctaveRCFile" -Force | Out-Null - $OctaveRC | Out-File -FilePath "$OctaveRCFile" -Encoding ASCII - } - build_script: # compile mexopencv - - cd "%MCV_ROOT%" - if "%WITH_MAKE%" == "yes" ( sh --login -c "cd \"$MCV_ROOT\" && mingw32-make $MAKE_OPTS $MAKE_EXTRA all contrib" ) else ( - %OCTAVE% --eval "mexopencv.make('opencv_path',getenv('OPENCV_DIR'), 'opencv_contrib',true, 'progress',false, 'verbose',1);" + cd "%MCV_ROOT%\.ci" && octave-cli %OCTAVE_OPTS% --eval "%BUILD_CMD%" ) - # build docs - - if "%DOXY%" == "yes" ( doxygen Doxyfile ) - -after_build: # package MEX-files - - cd "%MCV_ROOT%" && 7z a -t7z "%APPVEYOR_BUILD_FOLDER%\mexopencv.7z" *.mex -r -y > nul + - cd "%MCV_ROOT%" && 7z a -t7z "%MCV_ROOT%\mexopencv.7z" *.mex -r -y > nul -before_test: - # print cv build info - - call %OCTAVE% --eval "disp(cv.getBuildInformation())" + # build docs + - if "%WITH_DOXY%" == "yes" ( doxygen Doxyfile ) test_script: # run test suite - #HACK: we reset %ERRORLEVEL% because we can't rely on Octave exit code - if "%WITH_MAKE%" == "yes" ( sh --login -c "cd \"$MCV_ROOT\" && mingw32-make $MAKE_OPTS test" ) else ( - %OCTAVE% --eval "cd test;try,UnitTest('ContribModules',true,'Verbosity',2);catch e,disp(e);exit(1);end;exit(0);" || ver > nul + cd "%MCV_ROOT%\.ci" && octave-cli %OCTAVE_OPTS% --eval "%TEST_CMD%" ) after_test: # upload xUnit test results - - ps: (New-Object System.Net.WebClient).UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\test\tests.xml)) + - ps: | + $xUnitFile = "$env:MCV_ROOT\.ci\tests.xml" + $url = "https://ci.appveyor.com/api/testresults/junit/$env:APPVEYOR_JOB_ID" + if (Test-Path -Path $xUnitFile) { + (New-Object System.Net.WebClient).UploadFile($url, $xUnitFile) + } diff --git a/doc/helptoc.xml b/doc/helptoc.xml new file mode 100644 index 000000000..b97f62780 --- /dev/null +++ b/doc/helptoc.xml @@ -0,0 +1,522 @@ + + + mexopencv + Getting Started + Developing a new MEX function + Gotchas + + Readme + Installation + OpenCV 3 + Windows + MATLAB + Windows + Octave + Linux + MATLAB + Linux + Octave + + OpenCV 2 + Linux + MATLAB + + + Troubleshooting + Windows + UNIX + + + Functions + opencv - Main Modules + core: Core Functionality + cv.borderInterpolate + cv.copyMakeBorder + cv.add + cv.subtract + cv.multiply + cv.divide + cv.addWeighted + cv.convertScaleAbs + cv.convertFp16 + cv.LUT + cv.norm + cv.PSNR + cv.batchDistance + cv.normalize + cv.flip + cv.rotate + cv.bitwise_and + cv.bitwise_or + cv.bitwise_xor + cv.bitwise_not + cv.absdiff + cv.inRange + cv.compare + cv.polarToCart + cv.cartToPolar + cv.phase + cv.magnitude + cv.transform + cv.perspectiveTransform + cv.invert + cv.solve + cv.eigen + cv.eigenNonSymmetric + cv.calcCovarMatrix + cv.Mahalanobis + cv.dft + cv.dct + cv.mulSpectrums + cv.getOptimalDFTSize + cv.setRNGSeed + cv.PCA + cv.LDA + cv.SVD + cv.kmeans + cv.Rect + cv.RotatedRect + cv.copyTo + cv.convertTo + cv.FileStorage + cv.tempfile + cv.glob + cv.Utils + cv.getBuildInformation + cv.TickMeter + cv.DownhillSolver + cv.ConjGradSolver + cv.solveLP + + imgproc: Image Processing + cv.GeneralizedHoughBallard + cv.GeneralizedHoughGuil + cv.CLAHE + cv.Subdiv2D + cv.LineSegmentDetector + cv.getGaussianKernel + cv.getDerivKernels + cv.getGaborKernel + cv.getStructuringElement + cv.medianBlur + cv.GaussianBlur + cv.bilateralFilter + cv.boxFilter + cv.sqrBoxFilter + cv.blur + cv.filter2D + cv.sepFilter2D + cv.Sobel + cv.spatialGradient + cv.Scharr + cv.Laplacian + cv.Canny + cv.Canny2 + cv.cornerMinEigenVal + cv.cornerHarris + cv.cornerEigenValsAndVecs + cv.preCornerDetect + cv.cornerSubPix + cv.goodFeaturesToTrack + cv.HoughLines + cv.HoughLinesP + cv.HoughCircles + cv.erode + cv.dilate + cv.morphologyEx + cv.resize + cv.warpAffine + cv.warpPerspective + cv.remap + cv.convertMaps + cv.getRotationMatrix2D + cv.getAffineTransform + cv.invertAffineTransform + cv.getPerspectiveTransform + cv.getRectSubPix + cv.logPolar + cv.linearPolar + cv.integral + cv.accumulate + cv.accumulateSquare + cv.accumulateProduct + cv.accumulateWeighted + cv.phaseCorrelate + cv.createHanningWindow + cv.threshold + cv.adaptiveThreshold + cv.pyrDown + cv.pyrUp + cv.buildPyramid + cv.undistort + cv.initUndistortRectifyMap + cv.initWideAngleProjMap + cv.getDefaultNewCameraMatrix + cv.undistortPoints + cv.calcHist + cv.calcBackProject + cv.compareHist + cv.equalizeHist + cv.EMD + cv.watershed + cv.pyrMeanShiftFiltering + cv.grabCut + cv.distanceTransform + cv.floodFill + cv.cvtColor + cv.demosaicing + cv.moments + cv.HuMoments + cv.matchTemplate + cv.connectedComponents + cv.findContours + cv.approxPolyDP + cv.arcLength + cv.boundingRect + cv.contourArea + cv.minAreaRect + cv.boxPoints + cv.minEnclosingCircle + cv.minEnclosingTriangle + cv.matchShapes + cv.convexHull + cv.convexityDefects + cv.isContourConvex + cv.intersectConvexConvex + cv.fitEllipse + cv.fitLine + cv.pointPolygonTest + cv.rotatedRectangleIntersection + cv.blendLinear + cv.applyColorMap + cv.line + cv.arrowedLine + cv.rectangle + cv.circle + cv.ellipse + cv.drawMarker + cv.fillConvexPoly + cv.fillPoly + cv.polylines + cv.drawContours + cv.clipLine + cv.ellipse2Poly + cv.putText + cv.getTextSize + cv.LineIterator + + imgcodecs: Image File Reading and Writing + cv.imread + cv.imreadmulti + cv.imwrite + cv.imdecode + cv.imencode + + videoio: Video I/O + cv.VideoCapture + cv.VideoWriter + + video: Video Analysis + cv.CamShift + cv.meanShift + cv.buildOpticalFlowPyramid + cv.calcOpticalFlowPyrLK + cv.calcOpticalFlowFarneback + cv.estimateRigidTransform + cv.findTransformECC + cv.KalmanFilter + cv.DualTVL1OpticalFlow + cv.FarnebackOpticalFlow + cv.SparsePyrLKOpticalFlow + cv.BackgroundSubtractorMOG2 + cv.BackgroundSubtractorKNN + + calib3d: Camera Calibration and 3D Reconstruction + cv.Rodrigues + cv.findHomography + cv.RQDecomp3x3 + cv.decomposeProjectionMatrix + cv.matMulDeriv + cv.composeRT + cv.projectPoints + cv.solvePnP + cv.solvePnPRansac + cv.solveP3P + cv.initCameraMatrix2D + cv.findChessboardCorners + cv.find4QuadCornerSubpix + cv.drawChessboardCorners + cv.findCirclesGrid + cv.calibrateCamera + cv.calibrationMatrixValues + cv.stereoCalibrate + cv.stereoRectify + cv.stereoRectifyUncalibrated + cv.rectify3Collinear + cv.getOptimalNewCameraMatrix + cv.convertPointsToHomogeneous + cv.convertPointsFromHomogeneous + cv.findFundamentalMat + cv.findEssentialMat + cv.decomposeEssentialMat + cv.recoverPose + cv.computeCorrespondEpilines + cv.triangulatePoints + cv.correctMatches + cv.filterSpeckles + cv.getValidDisparityROI + cv.validateDisparity + cv.reprojectImageTo3D + cv.sampsonDistance + cv.estimateAffine3D + cv.estimateAffine2D + cv.estimateAffinePartial2D + cv.decomposeHomographyMat + cv.StereoBM + cv.StereoSGBM + + features2d: 2D Features Framework + cv.KeyPointsFilter + cv.FeatureDetector + cv.DescriptorExtractor + cv.BRISK + cv.ORB + cv.MSER + cv.FAST + cv.FastFeatureDetector + cv.AGAST + cv.AgastFeatureDetector + cv.GFTTDetector + cv.SimpleBlobDetector + cv.KAZE + cv.AKAZE + cv.DescriptorMatcher + cv.drawKeypoints + cv.drawMatches + cv.evaluateFeatureDetector + cv.computeRecallPrecisionCurve + cv.BOWKMeansTrainer + cv.BOWImgDescriptorExtractor + + objdetect: Object Detection + cv.SimilarRects + cv.groupRectangles + cv.groupRectangles_meanshift + cv.CascadeClassifier + cv.HOGDescriptor + cv.DetectionBasedTracker + + dnn: Deep Neural Network + cv.Net + + ml: Machine Learning + cv.NormalBayesClassifier + cv.KNearest + cv.SVM + cv.EM + cv.DTrees + cv.RTrees + cv.Boost + cv.ANN_MLP + cv.LogisticRegression + cv.SVMSGD + cv.randMVNormal + cv.createConcentricSpheresTestSet + + photo: Computational Photography + cv.inpaint + cv.fastNlMeansDenoising + cv.fastNlMeansDenoisingColored + cv.fastNlMeansDenoisingMulti + cv.fastNlMeansDenoisingColoredMulti + cv.denoise_TVL1 + cv.Tonemap + cv.TonemapDrago + cv.TonemapDurand + cv.TonemapReinhard + cv.TonemapMantiuk + cv.AlignMTB + cv.CalibrateDebevec + cv.CalibrateRobertson + cv.MergeDebevec + cv.MergeMertens + cv.MergeRobertson + cv.decolor + cv.seamlessClone + cv.colorChange + cv.illuminationChange + cv.textureFlattening + cv.edgePreservingFilter + cv.detailEnhance + cv.pencilSketch + cv.stylization + + stitching: Images Stitching + cv.RotationWarper + cv.FeaturesFinder + cv.FeaturesMatcher + cv.Estimator + cv.BundleAdjuster + cv.ExposureCompensator + cv.SeamFinder + cv.Blender + cv.Timelapser + cv.Stitcher + + shape: Shape Distance and Matching + cv.EMDL1 + cv.ShapeTransformer + cv.ShapeContextDistanceExtractor + cv.HausdorffDistanceExtractor + + superres: Super Resolution + cv.SuperResolution + + videostab: Video Stabilization + cv.estimateGlobalMotionLeastSquares + cv.estimateGlobalMotionRansac + cv.calcBlurriness + cv.OnePassStabilizer + cv.TwoPassStabilizer + + + opencv_contrib - Extra Modules + aruco: ArUco Marker Detection + cv.dictionaryDump + cv.detectMarkers + cv.estimatePoseSingleMarkers + cv.boardDump + cv.estimatePoseBoard + cv.refineDetectedMarkers + cv.drawDetectedMarkers + cv.drawAxis + cv.drawMarkerAruco + cv.drawPlanarBoard + cv.calibrateCameraAruco + cv.getBoardObjectAndImagePoints + cv.drawCharucoBoard + cv.interpolateCornersCharuco + cv.estimatePoseCharucoBoard + cv.drawDetectedCornersCharuco + cv.calibrateCameraCharuco + cv.detectCharucoDiamond + cv.drawDetectedDiamonds + cv.drawCharucoDiamond + + bgsegm: : Improved Background-Foreground Segmentation Methods + cv.BackgroundSubtractorMOG + cv.BackgroundSubtractorGMG + cv.BackgroundSubtractorCNT + + bioinspired: Biologically Inspired Vision Models and Derivated Tools + cv.Retina + cv.RetinaFastToneMapping + cv.TransientAreasSegmentationModule + + datasets: Framework for Working with Different Datasets + cv.Dataset + + dpm: Deformable Part-based Models + cv.DPMDetector + + face: Face Recognition + cv.BasicFaceRecognizer + cv.LBPHFaceRecognizer + cv.BIF + + img_hash: Image Hashing Algorithms + cv.ImgHash + + line_descriptor: Binary Descriptors for Lines Extracted from an Image + cv.BinaryDescriptor + cv.LSDDetector + cv.BinaryDescriptorMatcher + cv.drawLineMatches + cv.drawKeylines + + optflow: Optical Flow Algorithms + cv.OpticalFlowPCAFlow + cv.GPCForest + cv.calcOpticalFlowSF + cv.calcOpticalFlowDF + cv.calcOpticalFlowSparseToDense + cv.readOpticalFlow + cv.writeOpticalFlow + cv.VariationalRefinement + cv.DISOpticalFlow + cv.updateMotionHistory + cv.calcMotionGradient + cv.calcGlobalOrientation + cv.segmentMotion + + plot: Plot Function for Mat Data + cv.Plot2d + + saliency: Saliency API + cv.StaticSaliencySpectralResidual + cv.StaticSaliencyFineGrained + cv.MotionSaliencyBinWangApr2014 + cv.ObjectnessBING + + xfeatures2d: Extra 2D Features Framework + cv.SIFT + cv.SURF + cv.FREAK + cv.StarDetector + cv.BriefDescriptorExtractor + cv.LUCID + cv.LATCH + cv.DAISY + cv.MSDDetector + cv.VGG + cv.BoostDesc + cv.PCTSignatures + cv.PCTSignaturesSQFD + cv.HarrisLaplaceFeatureDetector + cv.AffineFeature2D + cv.FASTForPointSet + + ximgproc: Extended Image Processing + cv.DTFilter + cv.GuidedFilter + cv.AdaptiveManifoldFilter + cv.jointBilateralFilter + cv.bilateralTextureFilter + cv.rollingGuidanceFilter + cv.FastGlobalSmootherFilter + cv.l0Smooth + cv.DisparityWLSFilter + cv.EdgeAwareInterpolator + cv.StructuredEdgeDetection + cv.SuperpixelSEEDS + cv.SuperpixelSLIC + cv.SuperpixelLSC + cv.GraphSegmentation + cv.SelectiveSearchSegmentation + cv.FastHoughTransform + cv.HoughPoint2Line + cv.FastLineDetector + cv.covarianceEstimation + cv.weightedMedianFilter + cv.GradientPaillou + cv.GradientDeriche + cv.PeiLinNormalization + cv.niBlackThreshold + cv.thinning + cv.anisotropicDiffusion + + xobjdetect: Extended Object Detection + cv.WBDetector + + xphoto: Additional Photo Processing Algorithms + cv.inpaint2 + cv.SimpleWB + cv.GrayworldWB + cv.LearningBasedWB + cv.applyChannelGains + cv.dctDenoising + cv.bm3dDenoising + + + + Links + mexopencv + OpenCV + + + diff --git a/doc/opencv.png b/doc/opencv.png new file mode 100644 index 000000000..588f66b1e Binary files /dev/null and b/doc/opencv.png differ diff --git a/include/mexopencv.hpp b/include/mexopencv.hpp index caff3b777..bdc67f40e 100644 --- a/include/mexopencv.hpp +++ b/include/mexopencv.hpp @@ -174,7 +174,7 @@ const ConstMap NormTypeInv = ConstMap do { \ if ((TF)) { (NUM) |= (BIT); } \ else { (NUM) &= ~(BIT); } \ - } while(0) + } while (0) /// Alias for input/output arguments number check inline void nargchk(bool cond) @@ -211,7 +211,7 @@ std::vector > MxArrayToVectorPoint(const MxArray& arr) if (arr.numel() == 2) vp.push_back(arr.toPoint_()); else - arr.toMat(cv::DataType::depth).reshape(2, 0).copyTo(vp); + arr.toMat(cv::traits::Depth >::value).reshape(2, 0).copyTo(vp); } else if (arr.isCell()) { /* @@ -253,7 +253,7 @@ std::vector > MxArrayToVectorPoint3(const MxArray& arr) if (arr.numel() == 3) vp.push_back(arr.toPoint3_()); else - arr.toMat(cv::DataType::depth).reshape(3, 0).copyTo(vp); + arr.toMat(cv::traits::Depth >::value).reshape(3, 0).copyTo(vp); } else if (arr.isCell()) { /* @@ -296,7 +296,7 @@ std::vector > MxArrayToVectorRect(const MxArray& arr) if (arr.numel() == 4) vr.push_back(arr.toRect_()); else - arr.toMat(cv::DataType::depth).reshape(4, 0).copyTo(vr); + arr.toMat(cv::traits::Depth >::value).reshape(4, 0).copyTo(vr); } else if (arr.isCell()) { /* @@ -339,7 +339,7 @@ std::vector > MxArrayToVectorVec(const MxArray& arr) if (arr.numel() == cn) vv.push_back(arr.toVec()); else - arr.toMat(cv::Vec::depth).reshape(cn, 0).copyTo(vv); + arr.toMat(cv::traits::Depth >::value).reshape(cn, 0).copyTo(vv); } else if (arr.isCell()) { /* diff --git a/info.xml b/info.xml new file mode 100644 index 000000000..2747978a2 --- /dev/null +++ b/info.xml @@ -0,0 +1,11 @@ + + + + 14 + mexopencv + toolbox + $toolbox/matlab/icons/bookicon.gif + doc + doc/opencv.png + diff --git a/lib/.gitignore b/lib/.gitkeep similarity index 100% rename from lib/.gitignore rename to lib/.gitkeep diff --git a/opencv_contrib/+cv/AdaptiveManifoldFilter.m b/opencv_contrib/+cv/AdaptiveManifoldFilter.m index c73c98500..b87c2b3ba 100644 --- a/opencv_contrib/+cv/AdaptiveManifoldFilter.m +++ b/opencv_contrib/+cv/AdaptiveManifoldFilter.m @@ -24,7 +24,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -46,22 +47,21 @@ function this = AdaptiveManifoldFilter(varargin) %ADAPTIVEMANIFOLDFILTER Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines % - % obj = cv.AdaptiveManifoldFilter() - % obj = cv.AdaptiveManifoldFilter('OptionName',optionValue, ...) + % obj = cv.AdaptiveManifoldFilter() + % obj = cv.AdaptiveManifoldFilter('OptionName',optionValue, ...) % % ## Options % * __SigmaS__ spatial standard deviation. default 16.0 % * __SigmaR__ color space standard deviation, it is similar to - % the sigma in the color space into cv.bilateralFilter. - % default 0.2 + % the sigma in the color space into cv.bilateralFilter. + % default 0.2 % * __AdjustOutliers__ optional, specify perform outliers adjust - % operation or not, (Eq. 9) in the original paper. - % default false + % operation or not, (Eq. 9) in the original paper. default false % % For more details about Adaptive Manifold Filter parameters, see % the original article [Gastal12]. % - % ## Note + % ### Note % Joint images with `uint8` and `uint16` depth converted to images % with `single` depth and [0; 1] color range before processing. % Hence color space `SigmaR` must be in [0; 1] range, unlike same @@ -75,7 +75,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.AdaptiveManifoldFilter % @@ -89,7 +89,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.AdaptiveManifoldFilter.empty, % cv.AdaptiveManifoldFilter.load @@ -100,11 +100,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.AdaptiveManifoldFilter.clear, % cv.AdaptiveManifoldFilter.load @@ -115,7 +115,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -131,21 +131,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -159,11 +159,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.AdaptiveManifoldFilter.save, % cv.AdaptiveManifoldFilter.load @@ -177,7 +177,7 @@ function load(this, fname_or_str, varargin) function collectGarbage(this) %COLLECTGARBAGE Collect garbage % - % obj.collectGarbage() + % obj.collectGarbage() % % See also: cv.AdaptiveManifoldFilter % @@ -187,13 +187,13 @@ function collectGarbage(this) function dst = filter(this, src, varargin) %FILTER Apply high-dimensional filtering using adaptive manifolds % - % dst = obj.filter(src) - % dst = obj.filter(src, joint) + % dst = obj.filter(src) + % dst = obj.filter(src, joint) % % ## Input % * __src__ filtering image with any numbers of channels. % * __joint__ optional joint (also called as guided) image with - % any numbers of channels. + % any numbers of channels. % % ## Output % * __dst__ output image. @@ -253,13 +253,13 @@ function collectGarbage(this) function dst = amFilter(src, joint, varargin) %AMFILTER Simple one-line Adaptive Manifold Filter call % - % dst = cv.AdaptiveManifoldFilter.amFilter(src, joint) - % dst = cv.AdaptiveManifoldFilter.amFilter(src, joint, 'OptionName',optionValue, ...) + % dst = cv.AdaptiveManifoldFilter.amFilter(src, joint) + % dst = cv.AdaptiveManifoldFilter.amFilter(src, joint, 'OptionName',optionValue, ...) % % ## Input % * __src__ filtering image with any numbers of channels. % * __joint__ joint (also called as guided) image or array of - % images with any numbers of channels. + % images with any numbers of channels. % % ## Output % * __dst__ output image. @@ -267,13 +267,12 @@ function collectGarbage(this) % ## Options % * __SigmaS__ spatial standard deviation. default 16.0 % * __SigmaR__ color space standard deviation, it is similar to - % the sigma in the color space into cv.bilateralFilter. - % default 0.2 + % the sigma in the color space into cv.bilateralFilter. + % default 0.2 % * __AdjustOutliers__ optional, specify perform outliers adjust - % operation or not, (Eq. 9) in the original paper. - % default false + % operation or not, (Eq. 9) in the original paper. default false % - % ## Note + % ### Note % Joint images with `uint8` and `uint16` depth converted to images % with `single` depth and [0; 1] color range before processing. % Hence color space `SigmaR` must be in [0; 1] range, unlike same diff --git a/opencv_contrib/+cv/AffineFeature2D.m b/opencv_contrib/+cv/AffineFeature2D.m index 299c153a5..f5b0bdc49 100644 --- a/opencv_contrib/+cv/AffineFeature2D.m +++ b/opencv_contrib/+cv/AffineFeature2D.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -21,21 +22,20 @@ function this = AffineFeature2D(varargin) %AFFINEFEATURE2D Constructor % - % obj = cv.AffineFeature2D(detector) - % obj = cv.AffineFeature2D(detector, extractor) + % obj = cv.AffineFeature2D(detector) + % obj = cv.AffineFeature2D(detector, extractor) % - % obj = cv.AffineFeature2D({detector, 'key',val,...}) - % obj = cv.AffineFeature2D({detector, 'key',val,...}, {extractor, 'key',val,...}) + % obj = cv.AffineFeature2D({detector, 'key',val,...}) + % obj = cv.AffineFeature2D({detector, 'key',val,...}, {extractor, 'key',val,...}) % % ## Input % * __detector__ Feature detector. It can be spacified by a string - % specifying the type of feature detector, such as - % 'HarrisLaplaceFeatureDetector'. See - % cv.FeatureDetector.FeatureDetector for possible types. + % specifying the type of feature detector, such as + % 'HarrisLaplaceFeatureDetector'. See + % cv.FeatureDetector.FeatureDetector for possible types. % * __extractor__ Descriptor extractor. It can be specified by a - % string containing the type of descriptor extractor. See - % cv.DescriptorExtractor.DescriptorExtractor for possible - % types. + % string containing the type of descriptor extractor. See + % cv.DescriptorExtractor.DescriptorExtractor for possible types. % % In the first variant, it creates descriptor detector/extractor % of the given types using default parameters (by calling the @@ -58,11 +58,11 @@ % % ## Example % - % detector = cv.AffineFeature2D('HarrisLaplaceFeatureDetector'); + % detector = cv.AffineFeature2D('HarrisLaplaceFeatureDetector'); % - % detector = cv.AffineFeature2D(... - % {'HarrisLaplaceFeatureDetector', 'NumOctaves',6}, ... - % {'SURF', 'Upright',false}); + % detector = cv.AffineFeature2D(... + % {'HarrisLaplaceFeatureDetector', 'NumOctaves',6}, ... + % {'SURF', 'Upright',false}); % % See also: cv.AffineFeature2D.detectAndCompute % @@ -72,7 +72,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.AffineFeature2D % @@ -83,7 +83,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -97,7 +97,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.AffineFeature2D.empty, cv.AffineFeature2D.load % @@ -107,11 +107,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.AffineFeature2D.clear, cv.AffineFeature2D.load % @@ -121,7 +121,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -137,21 +137,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -165,11 +165,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.AffineFeature2D.save, cv.AffineFeature2D.load % @@ -182,16 +182,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % See also: cv.AffineFeature2D.compute, cv.DescriptorMatcher % @@ -201,7 +201,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -215,7 +215,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -229,27 +229,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.AffineFeature2D.compute, % cv.AffineFeature2D.detectAndCompute @@ -260,26 +258,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.AffineFeature2D - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.AffineFeature2D + % duplicates keypoint with several dominant orientations (for + % each orientation). In the first variant, this is a + % struct-array of detected keypoints. In the second variant, it + % is a cell-array, where `keypoints{i}` is a set of keypoints + % detected in `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.AffineFeature2D.detect, @@ -291,41 +289,41 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a 128-element - % vector, as returned by cv.AffineFeature2D.descriptorSize, - % so the total size of descriptors will be - % `numel(keypoints) * obj.descriptorSize()`. A matrix of - % size N-by-128 of class `single`, one row per keypoint. + % vectors of descriptors. Each descriptor is a 128-element + % vector, as returned by cv.AffineFeature2D.descriptorSize, so + % the total size of descriptors will be + % `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-128 of class `single`, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.AffineFeature2D.detect, cv.AffineFeature2D.compute % @@ -338,8 +336,8 @@ function load(this, fname_or_str, varargin) function keypoints = detect_elliptic(this, img, varargin) %DETECT_ELLIPTIC Detects keypoints in an image % - % keypoints = obj.detect_elliptic(img) - % [...] = obj.detect_elliptic(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect_elliptic(img) + % [...] = obj.detect_elliptic(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, 8-bit grayscale image. @@ -349,7 +347,7 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % % Detects keypoints in the image using the wrapped detector and % performs affine adaptation to augment them with their elliptic @@ -364,43 +362,43 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute_elliptic(this, img, varargin) %DETECTANDCOMPUTE_ELLIPTIC Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute_elliptic(img) - % [...] = obj.detectAndCompute_elliptic(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute_elliptic(img) + % [...] = obj.detectAndCompute_elliptic(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints (elliptic region around - % an interest point). A 1-by-N structure array with the - % following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). - % * __axes__ the lengths of the major and minor ellipse axes - % `[ax1,ax2]` - % * __si__ the integration scale at which the parameters - % were estimated. - % * __transf__ the transformation between image space and - % local patch space, 2x3 matrix. + % an interest point). A 1-by-N structure array with the + % following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong + % keypoints have been selected. Can be used for further + % sorting or subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). + % * __axes__ the lengths of the major and minor ellipse axes + % `[ax1,ax2]` + % * __si__ the integration scale at which the parameters were + % estimated. + % * __transf__ the transformation between image space and local + % patch space, 2x3 matrix. % * __descriptors__ Computed descriptors. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % Detects keypoints and computes descriptors for their surrounding % regions, after warping them into circles. diff --git a/opencv_contrib/+cv/BIF.m b/opencv_contrib/+cv/BIF.m index 71f63a4d5..16f87dc16 100644 --- a/opencv_contrib/+cv/BIF.m +++ b/opencv_contrib/+cv/BIF.m @@ -35,14 +35,14 @@ function this = BIF(varargin) %BIF Constructor % - % obj = cv.BIF() - % obj = cv.BIF('OptionName',optionValue, ...) + % obj = cv.BIF() + % obj = cv.BIF('OptionName',optionValue, ...) % % ## Options % * __NumBands__ The number of filter bands (`<= 8`) used for - % computing BIF. default 8 + % computing BIF. default 8 % * __NumRotations__ The number of image rotations for computing - % BIF. default 12 + % BIF. default 12 % % See also: cv.BIF.compute % @@ -52,6 +52,8 @@ function delete(this) %DELETE Destructor % + % obj.delete() + % % See also: cv.BIF % if isempty(this.id), return; end @@ -64,7 +66,7 @@ function delete(this) function features = compute(this, img) %COMPUTE Computes features sby input image % - % features = model.compute(img) + % features = model.compute(img) % % ## Input % * __image__ Input image (1-channel `single`). @@ -83,7 +85,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BIF.empty, cv.BIF.load % @@ -93,11 +95,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty. % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.BIF.clear, cv.BIF.load % @@ -107,7 +109,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +125,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +153,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BIF.save, cv.BIF.load % diff --git a/opencv_contrib/+cv/BackgroundSubtractorCNT.m b/opencv_contrib/+cv/BackgroundSubtractorCNT.m index 0b62d736e..44e147562 100644 --- a/opencv_contrib/+cv/BackgroundSubtractorCNT.m +++ b/opencv_contrib/+cv/BackgroundSubtractorCNT.m @@ -1,12 +1,12 @@ classdef BackgroundSubtractorCNT < handle %BACKGROUNDSUBTRACTORCNT Background subtraction based on counting % - % About as fast as BackgroundSubtractorMOG2 on a high end system. + % About as fast as cv.BackgroundSubtractorMOG2 on a high end system. % More than twice faster than MOG2 on cheap hardware (benchmarked on % Raspberry Pi3). % - % Algorithm by Sagi Zeevi: - % https://github.com/sagi-z/BackgroundSubtractorCNT + % Algorithm by: + % [Sagi Zeevi](https://github.com/sagi-z/BackgroundSubtractorCNT) % % See also: cv.BackgroundSubtractorCNT.BackgroundSubtractorCNT, % cv.BackgroundSubtractorCNT.apply, @@ -14,7 +14,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -33,18 +34,18 @@ function this = BackgroundSubtractorCNT(varargin) %BACKGROUNDSUBTRACTORCNT Creates a CNT Background Subtractor % - % bs = cv.BackgroundSubtractorCNT() - % bs = cv.BackgroundSubtractorCNT('OptionName', optionValue, ...) + % bs = cv.BackgroundSubtractorCNT() + % bs = cv.BackgroundSubtractorCNT('OptionName', optionValue, ...) % % ## Options % * __MinPixelStability__ number of frames with same pixel color - % to consider stable. default 15 + % to consider stable. default 15 % * __MaxPixelStability__ maximum allowed credit for a pixel in - % history. default 15*60 + % history. default 15*60 % * __UseHistory__ determines if we're giving a pixel credit for - % being stable for a long time. default true + % being stable for a long time. default true % * __IsParallel__ determines if we're parallelizing the algorithm. - % default true + % default true % % See also: cv.BackgroundSubtractorCNT % @@ -54,7 +55,7 @@ function delete(this) %DELETE Destructor % - % bs.delete() + % bs.delete() % % See also: cv.BackgroundSubtractorCNT % @@ -65,23 +66,23 @@ function delete(this) function fgmask = apply(this, im, varargin) %APPLY Updates the background model and computes the foreground mask % - % fgmask = bs.apply(im) - % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Next video frame. % % ## Output % * __fgmask__ The output foreground mask as an 8-bit binary image - % (0 for background, 255 for foregound). + % (0 for background, 255 for foregound). % % ## Options % * __LearningRate__ The value between 0 and 1 that indicates how - % fast the background model is learnt. Negative parameter - % value makes the algorithm to use some automatically chosen - % learning rate. 0 means that the background model is not - % updated at all, 1 means that the background model is - % completely reinitialized from the last frame. default -1 + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 % % See also: cv.BackgroundSubtractorCNT.getBackgroundImage % @@ -91,7 +92,7 @@ function delete(this) function bgImg = getBackgroundImage(this) %GETBACKGROUNDIMAGE Computes a background image % - % bgImg = bs.getBackgroundImage() + % bgImg = bs.getBackgroundImage() % % ## Output % * __bgImg__ The output background image. @@ -107,7 +108,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BackgroundSubtractorCNT.empty % @@ -117,11 +118,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.BackgroundSubtractorCNT.clear % @@ -131,11 +132,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BackgroundSubtractorCNT.save, cv.BackgroundSubtractorCNT.load % @@ -145,7 +146,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -160,21 +161,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/BackgroundSubtractorGMG.m b/opencv_contrib/+cv/BackgroundSubtractorGMG.m index c1fa8e40c..3fa147e76 100644 --- a/opencv_contrib/+cv/BackgroundSubtractorGMG.m +++ b/opencv_contrib/+cv/BackgroundSubtractorGMG.m @@ -34,7 +34,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -73,14 +74,14 @@ function this = BackgroundSubtractorGMG(varargin) %BACKGROUNDSUBTRACTORGMG Creates a GMG Background Subtractor % - % bs = cv.BackgroundSubtractorGMG() - % bs = cv.BackgroundSubtractorGMG('OptionName', optionValue, ...) + % bs = cv.BackgroundSubtractorGMG() + % bs = cv.BackgroundSubtractorGMG('OptionName', optionValue, ...) % % ## Options % * __InitializationFrames__ number of frames used to initialize - % the background models. default 120 + % the background models. default 120 % * __DecisionThreshold__ Threshold value, above which it is - % marked foreground, else background. default 0.8 + % marked foreground, else background. default 0.8 % % Default constructor sets all parameters to default values. % @@ -92,7 +93,7 @@ function delete(this) %DELETE Destructor % - % bs.delete() + % bs.delete() % % See also: cv.BackgroundSubtractorGMG % @@ -103,23 +104,23 @@ function delete(this) function fgmask = apply(this, im, varargin) %APPLY Updates the background model and computes the foreground mask % - % fgmask = bs.apply(im) - % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Next video frame. % % ## Output % * __fgmask__ The output foreground mask as an 8-bit binary image - % (0 for background, 255 for foregound). + % (0 for background, 255 for foregound). % % ## Options % * __LearningRate__ The value between 0 and 1 that indicates how - % fast the background model is learnt. Negative parameter - % value makes the algorithm to use some automatically chosen - % learning rate. 0 means that the background model is not - % updated at all, 1 means that the background model is - % completely reinitialized from the last frame. default -1 + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 % % Performs single-frame background subtraction and builds up a % statistical background image model. @@ -132,12 +133,12 @@ function delete(this) function bgImg = getBackgroundImage(this) %GETBACKGROUNDIMAGE Computes a background image % - % bgImg = bs.getBackgroundImage() + % bgImg = bs.getBackgroundImage() % % ## Output % * __bgImg__ The output background image. % - % ## Note + % ### Note % Method not implemented for this class, returns empty. % % See also: cv.BackgroundSubtractorGMG.apply @@ -151,7 +152,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BackgroundSubtractorGMG.empty % @@ -161,11 +162,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.BackgroundSubtractorGMG.clear % @@ -175,11 +176,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BackgroundSubtractorGMG.save, cv.BackgroundSubtractorGMG.load % @@ -189,7 +190,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -204,21 +205,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/BackgroundSubtractorMOG.m b/opencv_contrib/+cv/BackgroundSubtractorMOG.m index 470bbf407..c6521aceb 100644 --- a/opencv_contrib/+cv/BackgroundSubtractorMOG.m +++ b/opencv_contrib/+cv/BackgroundSubtractorMOG.m @@ -16,7 +16,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -40,16 +41,16 @@ function this = BackgroundSubtractorMOG(varargin) %BACKGROUNDSUBTRACTORMOG Creates mixture-of-gaussian background subtractor % - % bs = cv.BackgroundSubtractorMOG() - % bs = cv.BackgroundSubtractorMOG('OptionName', optionValue, ...) + % bs = cv.BackgroundSubtractorMOG() + % bs = cv.BackgroundSubtractorMOG('OptionName', optionValue, ...) % % ## Options % * __History__ Length of the history. default 200 % * __NMixtures__ Number of Gaussian mixtures. default 5 % * __BackgroundRatio__ Background ratio. default 0.7 % * __NoiseSigma__ Noise strength (standard deviation of the - % brightness or each color channel). 0 means some automatic - % value. default 0 + % brightness or each color channel). 0 means some automatic + % value. default 0 % % Default constructor sets all parameters to default values. % @@ -61,7 +62,7 @@ function delete(this) %DELETE Destructor % - % bs.delete() + % bs.delete() % % See also: cv.BackgroundSubtractorMOG % @@ -72,23 +73,23 @@ function delete(this) function fgmask = apply(this, im, varargin) %APPLY Updates the background model and computes the foreground mask % - % fgmask = bs.apply(im) - % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) % % ## Input % * __im__ Next video frame. % % ## Output % * __fgmask__ The output foreground mask as an 8-bit binary image - % (0 for background, 255 for foregound). + % (0 for background, 255 for foregound). % % ## Options % * __LearningRate__ The value between 0 and 1 that indicates how - % fast the background model is learnt. Negative parameter - % value makes the algorithm to use some automatically chosen - % learning rate. 0 means that the background model is not - % updated at all, 1 means that the background model is - % completely reinitialized from the last frame. default -1 + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 % % See also: cv.BackgroundSubtractorMOG.getBackgroundImage % @@ -98,12 +99,12 @@ function delete(this) function bgImg = getBackgroundImage(this) %GETBACKGROUNDIMAGE Computes a background image % - % bgImg = bs.getBackgroundImage() + % bgImg = bs.getBackgroundImage() % % ## Output % * __bgImg__ The output background image. % - % ## Note + % ### Note % Method not implemented for this class, throws exception. % % See also: cv.BackgroundSubtractorMOG.apply @@ -117,7 +118,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BackgroundSubtractorMOG.empty % @@ -127,11 +128,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.BackgroundSubtractorMOG.clear % @@ -141,11 +142,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BackgroundSubtractorMOG.save, cv.BackgroundSubtractorMOG.load % @@ -155,7 +156,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -170,21 +171,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/BasicFaceRecognizer.m b/opencv_contrib/+cv/BasicFaceRecognizer.m index e666edacc..6a2112cee 100644 --- a/opencv_contrib/+cv/BasicFaceRecognizer.m +++ b/opencv_contrib/+cv/BasicFaceRecognizer.m @@ -28,32 +28,32 @@ % Here is an example of setting a threshold for the Eigenfaces method, % when creating the model: % - % % Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0 - % num_components = 10; - % threshold = 10.0; - % % Then if you want to have a face recognizer with a confidence threshold, - % % create the concrete implementation with the appropiate parameters: - % model = cv.BasicFaceRecognizer('Eigenfaces', ... - % 'NumComponents',num_components, 'Threshold',threshold); + % % Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0 + % num_components = 10; + % threshold = 10.0; + % % Then if you want to have a face recognizer with a confidence threshold, + % % create the concrete implementation with the appropiate parameters: + % model = cv.BasicFaceRecognizer('Eigenfaces', ... + % 'NumComponents',num_components, 'Threshold',threshold); % % Sometimes it's impossible to train the model, just to experiment with % threshold values. It's possible to set model thresholds during runtime. % Let's see how we would set/get the prediction for the Eigenface model, % we've created above: % - % % The following line reads the threshold from the Eigenfaces model: - % current_threshold = model.Threshold; - % % And this line sets the threshold to 0.0: - % model.Threshold = 0.0; + % % The following line reads the threshold from the Eigenfaces model: + % current_threshold = model.Threshold; + % % And this line sets the threshold to 0.0: + % model.Threshold = 0.0; % % If you've set the threshold to 0.0 as we did above, then: % - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % Get a prediction from the model. Note: We've set a threshold of 0.0 above, - % % since the distance is almost always larger than 0.0, you'll get -1 as - % % label, which indicates, this face is unknown - % predicted_label = model.predict(img); - % % ... + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % Get a prediction from the model. Note: We've set a threshold of 0.0 above, + % % since the distance is almost always larger than 0.0, you'll get -1 as + % % label, which indicates, this face is unknown + % predicted_label = model.predict(img); + % % ... % % is going to yield -1 as predicted label, which states this face is % unknown. @@ -73,7 +73,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -88,37 +89,34 @@ function this = BasicFaceRecognizer(ftype, varargin) %BASICFACERECOGNIZER Constructor % - % obj = cv.BasicFaceRecognizer(ftype) - % obj = cv.BasicFaceRecognizer(ftype, 'OptionName',optionValue, ...) + % obj = cv.BasicFaceRecognizer(ftype) + % obj = cv.BasicFaceRecognizer(ftype, 'OptionName',optionValue, ...) % % ## Input % * __ftype__ Face recognizer type. One of: - % * __Eigenfaces__ Face Recognizer based on Eigenfaces. - % * __Fisherfaces__ Face Recognizer based on Fisherfaces. + % * __Eigenfaces__ Face Recognizer based on Eigenfaces. + % * __Fisherfaces__ Face Recognizer based on Fisherfaces. % % ## Options % * __NumComponents__ The number of components, default 0. - % * __Eigenfaces__ The number of components kept for this - % Principal Component Analysis. As a hint: There's no - % rule how many components (read: Eigenfaces) should - % be kept for good reconstruction capabilities. It is - % based on your input data, so experiment with the - % number. Keeping 80 components should almost always - % be sufficient. - % * __Fisherfaces__ The number of components kept for this - % Linear Discriminant Analysis with the Fisherfaces - % criterion. It's useful to keep all components, that - % means the number of your classes `c` (read: - % subjects, persons you want to recognize). If you - % leave this at the default (0) or set it to a value - % less-equal 0 or greater (`c-1`), it will be set to - % the correct number (`c-1`) automatically. + % * __Eigenfaces__ The number of components kept for this + % Principal Component Analysis. As a hint: There's no rule how + % many components (read: Eigenfaces) should be kept for good + % reconstruction capabilities. It is based on your input data, + % so experiment with the number. Keeping 80 components should + % almost always be sufficient. + % * __Fisherfaces__ The number of components kept for this + % Linear Discriminant Analysis with the Fisherfaces criterion. + % It's useful to keep all components, that means the number of + % your classes `c` (read: subjects, persons you want to + % recognize). If you leave this at the default (0) or set it + % to a value less-equal 0 or greater (`c-1`), it will be set + % to the correct number (`c-1`) automatically. % * __Threshold__ The threshold applied in the prediction. If the - % distance to the nearest neighbor is larger than the - % threshold, the prediction returns -1. default `realmax` + % distance to the nearest neighbor is larger than the threshold, + % the prediction returns -1. default `realmax` % % ### Notes - % % - Training and prediction must be done on grayscale images, use % cv.cvtColor to convert between the color spaces. % - THE EIGENFACES/FISHERFACES METHOD MAKES THE ASSUMPTION, THAT @@ -136,7 +134,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BasicFaceRecognizer % @@ -147,7 +145,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -161,9 +159,10 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % - % See also: cv.BasicFaceRecognizer.empty, cv.BasicFaceRecognizer.load + % See also: cv.BasicFaceRecognizer.empty, + % cv.BasicFaceRecognizer.load % BasicFaceRecognizer_(this.id, 'clear'); end @@ -171,13 +170,14 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % - % See also: cv.BasicFaceRecognizer.clear, cv.BasicFaceRecognizer.load + % See also: cv.BasicFaceRecognizer.clear, + % cv.BasicFaceRecognizer.load % b = BasicFaceRecognizer_(this.id, 'empty'); end @@ -185,16 +185,16 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves a FaceRecognizer and its model state % - % obj.save(filename) - % str = obj.save(filename) + % obj.save(filename) + % str = obj.save(filename) % % ## Input % * __filename__ The filename to store this FaceRecognizer to - % (either XML/YAML). + % (either XML/YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % Saves this model to a given filename, either as XML or YAML. % @@ -208,19 +208,19 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads a FaceRecognizer and its model state % - % obj.load(filename) - % obj.load(str, 'FromString',true) + % obj.load(fname) + % obj.load(str, 'FromString',true) % % ## Input - % * __filename__ The filename to load this FaceRecognizer from - % (either XML/YAML). + % * __fname__ The filename to load this FaceRecognizer from + % (either XML/YAML). % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % Loads a persisted model and state from a given XML or YAML file. % @@ -232,13 +232,14 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % - % See also: cv.BasicFaceRecognizer.save, cv.BasicFaceRecognizer.load + % See also: cv.BasicFaceRecognizer.save, + % cv.BasicFaceRecognizer.load % name = BasicFaceRecognizer_(this.id, 'getDefaultName'); end @@ -249,14 +250,13 @@ function load(this, fname_or_str, varargin) function train(this, src, labels) %TRAIN Trains a FaceRecognizer with given data and associated labels % - % obj.train(src, labels) + % obj.train(src, labels) % % ## Input % * __src__ The training images, that means the faces you want to - % learn. The data has to be given as a cell array of - % matrices. + % learn. The data has to be given as a cell array of matrices. % * __labels__ The labels corresponding to the images. have to be - % given as an integer vector. + % given as an integer vector. % % The following source code snippet shows you how to learn a % Fisherfaces model on a given set of images. The images are read @@ -267,39 +267,39 @@ function train(this, src, labels) % FaceRecognizer you don't have to pay any attention to the order % of the labels, just make sure same persons have the same label: % - % % holds images and labels - % images = {}; - % labels = []; - % % images for first person - % images{end+1} = cv.imread('person0/0.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % images{end+1} = cv.imread('person0/1.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % images{end+1} = cv.imread('person0/2.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % % images for second person - % images{end+1} = cv.imread('person1/0.jpg', 'Grayscale',true); - % labels{end+1} = 1; - % images{end+1} = cv.imread('person1/1.jpg', 'Grayscale',true); - % labels{end+1} = 1; - % images{end+1} = cv.imread('person1/2.jpg', 'Grayscale',true); - % labels{end+1} = 1; + % % holds images and labels + % images = {}; + % labels = []; + % % images for first person + % images{end+1} = cv.imread('person0/0.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % images{end+1} = cv.imread('person0/1.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % images{end+1} = cv.imread('person0/2.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % % images for second person + % images{end+1} = cv.imread('person1/0.jpg', 'Grayscale',true); + % labels{end+1} = 1; + % images{end+1} = cv.imread('person1/1.jpg', 'Grayscale',true); + % labels{end+1} = 1; + % images{end+1} = cv.imread('person1/2.jpg', 'Grayscale',true); + % labels{end+1} = 1; % % Now that you have read some images, we can create a new % FaceRecognizer. In this example I'll create a Fisherfaces model % and decide to keep all of the possible Fisherfaces: % - % % Create a new Fisherfaces model and retain all available - % % Fisherfaces, this is the most common usage of this specific - % % FaceRecognizer: - % model = cv.BasicFaceRecognizer('Fisherfaces'); + % % Create a new Fisherfaces model and retain all available + % % Fisherfaces, this is the most common usage of this specific + % % FaceRecognizer: + % model = cv.BasicFaceRecognizer('Fisherfaces'); % % And finally train it on the given dataset (the face images and % labels): % - % % This is the common interface to train all of the available FaceRecognizer - % % implementations: - % model.train(images, labels); + % % This is the common interface to train all of the available FaceRecognizer + % % implementations: + % model.train(images, labels); % % See also: cv.BasicFaceRecognizer.predict % @@ -309,14 +309,13 @@ function train(this, src, labels) function update(this, src, labels) %UPDATE Updates a FaceRecognizer with given data and associated labels % - % obj.update(src, labels) + % obj.update(src, labels) % % ## Input % * __src__ The training images, that means the faces you want to - % learn. The data has to be given as a cell array of - % matrices. + % learn. The data has to be given as a cell array of matrices. % * __labels__ The labels corresponding to the images. have to be - % given as an integer vector. + % given as an integer vector. % % This method updates a (probably trained) FaceRecognizer, but % only if the algorithm supports it. The Local Binary Patterns @@ -327,30 +326,30 @@ function update(this, src, labels) % train empties the existing model and learns a new model, while % update does not delete any model data. % - % % Create a new LBPH model (it can be updated) and use the - % % default parameters, this is the most common usage of this - % % specific FaceRecognizer: - % model = cv.LBPHFaceRecognizer(); - % % This is the common interface to train all of the available - % % FaceRecognizer implementations: - % model.train(images, labels); - % % Some containers to hold new image. - % % You should add some images to the containers: - % newImages = {..}; - % newLabels = [..]; - % % Now updating the model is as easy as calling: - % model.update(newImages,newLabels); - % % This will preserve the old model data and extend the - % % existing model with the new features extracted from - % % newImages! + % % Create a new LBPH model (it can be updated) and use the + % % default parameters, this is the most common usage of this + % % specific FaceRecognizer: + % model = cv.LBPHFaceRecognizer(); + % % This is the common interface to train all of the available + % % FaceRecognizer implementations: + % model.train(images, labels); + % % Some containers to hold new image. + % % You should add some images to the containers: + % newImages = {..}; + % newLabels = [..]; + % % Now updating the model is as easy as calling: + % model.update(newImages,newLabels); + % % This will preserve the old model data and extend the + % % existing model with the new features extracted from + % % newImages! % % Calling update on an Eigenfaces model (see % cv.BasicFaceRecognizer), which doesn't support updating, will % throw an error similar to: % - % OpenCV Error: The function/feature is not implemented (This - % FaceRecognizer (FaceRecognizer.Eigenfaces) does not support - % updating, you have to use FaceRecognizer::train to update it.) + % OpenCV Error: The function/feature is not implemented (This + % FaceRecognizer (FaceRecognizer.Eigenfaces) does not support + % updating, you have to use FaceRecognizer::train to update it.) % % NOTE: The FaceRecognizer does not store your training images, % because this would be very memory intense and it's not the @@ -365,7 +364,7 @@ function update(this, src, labels) function [label, confidence] = predict(this, src) %PREDICT Predicts a label and associated confidence (e.g. distance) for a given input image % - % [label, confidence] = obj.predict(src) + % [label, confidence] = obj.predict(src) % % ## Input % * __src__ Sample image to get a prediction from. @@ -373,24 +372,24 @@ function update(this, src, labels) % ## Output % * __label__ The predicted label for the given image. % * __confidence__ Associated confidence (e.g. distance) for the - % predicted label. + % predicted label. % % The following example shows how to get a prediction from a % trained model: % - % % Do your initialization here (create the FaceRecognizer model) ... - % % Read in a sample image: - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % And get a prediction from the FaceRecognizer: - % predicted = model.predict(img); + % % Do your initialization here (create the FaceRecognizer model) ... + % % Read in a sample image: + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % And get a prediction from the FaceRecognizer: + % predicted = model.predict(img); % % Or to get a prediction and the associated confidence (e.g. % distance): % - % % Do your initialization here (create the FaceRecognizer model) ... - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % Get the prediction and associated confidence from the model - % [predicted_label, predicted_confidence] = model.predict(img); + % % Do your initialization here (create the FaceRecognizer model) ... + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % Get the prediction and associated confidence from the model + % [predicted_label, predicted_confidence] = model.predict(img); % % See also: cv.BasicFaceRecognizer.train % @@ -400,19 +399,19 @@ function update(this, src, labels) function results = predict_collect(this, src, varargin) %PREDICT_COLLECT send all result of prediction to collector for custom result handling % - % results = obj.predict_collect(src) - % results = obj.predict_collect(src, 'OptionName',optionValue, ...) + % results = obj.predict_collect(src) + % results = obj.predict_collect(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Sample image to get a prediction from. % % ## Output % * __results__ A struct array of all collected predictions labels - % and associated prediction distances for the given image. + % and associated prediction distances for the given image. % % ## Options % * __Sorted__ If set, results will be sorted by distance. Each - % value is a pair of label and distance. default false + % value is a pair of label and distance. default false % % See also: cv.BasicFaceRecognizer.predict % @@ -422,7 +421,7 @@ function update(this, src, labels) function setLabelInfo(this, label, strInfo) %SETLABELINFO Sets string info for the specified model's label % - % obj.setLabelInfo(label, strInfo) + % obj.setLabelInfo(label, strInfo) % % ## Input % * __label__ label id. @@ -439,7 +438,7 @@ function setLabelInfo(this, label, strInfo) function strInfo = getLabelInfo(this, label) %GETLABELINFO Gets string information by label % - % strInfo = obj.getLabelInfo(label) + % strInfo = obj.getLabelInfo(label) % % ## Input % * __label__ label id. @@ -459,7 +458,7 @@ function setLabelInfo(this, label, strInfo) function labels = getLabelsByString(this, str) %GETLABELSBYSTRING Gets vector of labels by string % - % labels = obj.getLabelsByString(str) + % labels = obj.getLabelsByString(str) % % ## Input % * __str__ string information (substring matching). @@ -481,12 +480,12 @@ function setLabelInfo(this, label, strInfo) function projections = getProjections(this) %GETPROJECTIONS Get training data projections % - % projections = obj.getProjections() + % projections = obj.getProjections() % % ## Output - % * __projections__ The projections of the training data. - % A cell array of length `N` (training set size), each cell - % contains a `double` vector of length `obj.NumComponents`. + % * __projections__ The projections of the training data. A cell + % array of length `N` (training set size), each cell contains a + % `double` vector of length `obj.NumComponents`. % % Note: returns an empty mat if the model is not trained. % @@ -498,11 +497,11 @@ function setLabelInfo(this, label, strInfo) function labels = getLabels(this) %GETLABELS Get labels % - % labels = obj.getLabels() + % labels = obj.getLabels() % % ## Output - % * __labels__ The labels corresponding to the projections. - % An integer vector of length `N` (training set size). + % * __labels__ The labels corresponding to the projections. An + % integer vector of length `N` (training set size). % % Note: returns an empty mat if the model is not trained. % @@ -514,13 +513,12 @@ function setLabelInfo(this, label, strInfo) function eigenvalues = getEigenValues(this) %GETEIGENVALUES Get PCA/LDA eigenvalues % - % eigenvalues = obj.getEigenValues() + % eigenvalues = obj.getEigenValues() % % ## Output % * __eigenvalues__ The eigenvalues for this Principal Component - % Analysis or Linear Discriminant Analysis (ordered - % descending). A `double` vector of length - % `obj.NumComponents`. + % Analysis or Linear Discriminant Analysis (ordered descending). + % A `double` vector of length `obj.NumComponents`. % % Note: returns an empty mat if the model is not trained. % @@ -532,14 +530,13 @@ function setLabelInfo(this, label, strInfo) function eigenvectors = getEigenVectors(this) %GETEIGENVECTORS Get PCA/LDA eigenvectors % - % eigenvectors = obj.getEigenVectors() + % eigenvectors = obj.getEigenVectors() % % ## Output % * __eigenvectors__ The eigenvectors for this Principal Component - % Analysis or Linear Discriminant Analysis (ordered by their - % eigenvalue). A `double` matrix of size - % `(w*h)-by-obj.NumComponents` (each column is a an - % eigenvector). + % Analysis or Linear Discriminant Analysis (ordered by their + % eigenvalue). A `double` matrix of size + % `(w*h)-by-obj.NumComponents` (each column is a an eigenvector). % % Note: returns an empty mat if the model is not trained. % @@ -551,12 +548,12 @@ function setLabelInfo(this, label, strInfo) function m = getMean(this) %GETMEAN Get sample mean % - % m = obj.getMean() + % m = obj.getMean() % % ## Output - % * __m__ The sample mean calculated from the training data. - % A `double` vector of length `w*h` (width and height of - % a face image). + % * __m__ The sample mean calculated from the training data. A + % `double` vector of length `w*h` (width and height of a face + % image). % % Note: returns an empty mat if the model is not trained. % diff --git a/opencv_contrib/+cv/BinaryDescriptor.m b/opencv_contrib/+cv/BinaryDescriptor.m index 569713010..8521d45c1 100644 --- a/opencv_contrib/+cv/BinaryDescriptor.m +++ b/opencv_contrib/+cv/BinaryDescriptor.m @@ -57,35 +57,35 @@ % band has been assignen its BD, the LBD descriptor of line is simply % given by: % - % LBD = (BD_1^T, BD_2^T, ... , BD_m^T)^T + % LBD = (BD_1^T, BD_2^T, ... , BD_m^T)^T % % To compute a band descriptor `B_j`, each `k`-th row in it is considered % and the gradients in such row are accumulated: % - % V1_j^k = lambda * sum[g'_{d_perp} > 0] ( g'_{d_perp} ) - % V2_j^k = lambda * sum[g'_{d_perp} < 0] ( -g'_{d_perp} ) - % V3_j^k = lambda * sum[g'_{d_L} > 0] ( g'_{d_L} ) - % V4_j^k = lambda * sum[g'_{d_L} < 0] ( -g'_{d_L} ) + % V1_j^k = lambda * sum[g'_{d_perp} > 0] ( g'_{d_perp} ) + % V2_j^k = lambda * sum[g'_{d_perp} < 0] ( -g'_{d_perp} ) + % V3_j^k = lambda * sum[g'_{d_L} > 0] ( g'_{d_L} ) + % V4_j^k = lambda * sum[g'_{d_L} < 0] ( -g'_{d_L} ) % % with `\lambda = f_g(k) * f_l(k)` % % By stacking previous results, we obtain the % *band description matrix (BDM)* % - % BDM_j = [V1_j^1, V1_j^2, ..., V1_j^n ; - % V2_j^1, V2_j^2, ..., V2_j^n ; - % V3_j^1, V3_j^2, ..., V3_j^n ; - % V4_j^1, V4_j^2, ..., V4_j^n ] in R^(4xn) + % BDM_j = [V1_j^1, V1_j^2, ..., V1_j^n ; + % V2_j^1, V2_j^2, ..., V2_j^n ; + % V3_j^1, V3_j^2, ..., V3_j^n ; + % V4_j^1, V4_j^2, ..., V4_j^n ] in R^(4xn) % % with `n` the number of rows in band `B_j`: % - % n = {2w, j = 1||m; - % {3w, else. + % n = {2w, j = 1||m; + % {3w, else. % % Each `BD_j` can be obtained using the standard deviation vector `S_j` % and mean vector `M_j` of `BDM_J`. Thus, finally: % - % LBD = (M_1^T, S_1^T, M_2^T, S_2^T, ..., M_m^T, S_m^T)^T in R^(8m) + % LBD = (M_1^T, S_1^T, M_2^T, S_2^T, ..., M_m^T, S_m^T)^T in R^(8m) % % Once the LBD has been obtained, it must be converted into a binary form. % For such purpose, we consider 32 possible pairs of BD inside it; each @@ -109,7 +109,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -125,15 +126,15 @@ function this = BinaryDescriptor(varargin) %BINARYDESCRIPTOR Create a BinaryDescriptor object with default parameters (or with the ones provided) % - % obj = cv.BinaryDescriptor() - % obj = cv.BinaryDescriptor('OptionName',optionValue, ...) + % obj = cv.BinaryDescriptor() + % obj = cv.BinaryDescriptor('OptionName',optionValue, ...) % % ## Options % * __KSize__ the size of Gaussian kernel: `ksize-by-ksize`. - % default 5 + % default 5 % * __NumOfOctave__ the number of image octaves. default 1 % * __ReductionRatio__ image's reduction ratio in construction of - % Gaussian pyramids. default 2 + % Gaussian pyramids. default 2 % * __WidthOfBand__ the width of band; default 7 % % If no argument is provided, constructor sets default values. @@ -147,7 +148,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BinaryDescriptor % @@ -161,7 +162,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BinaryDescriptor.empty, cv.BinaryDescriptor.load % @@ -171,11 +172,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.BinaryDescriptor.clear, cv.BinaryDescriptor.load % @@ -185,7 +186,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -201,21 +202,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -229,11 +230,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BinaryDescriptor.save, cv.BinaryDescriptor.load % @@ -246,16 +247,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for BinaryDescriptor. % @@ -267,7 +268,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -283,7 +284,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -299,9 +300,9 @@ function load(this, fname_or_str, varargin) function keylines = detect(this, img, varargin) %DETECT Requires line detection % - % keylines = obj.detect(img) - % keylines = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keylines = obj.detect(img) + % keylines = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input image (first variant), 8-bit grayscale. @@ -309,39 +310,37 @@ function load(this, fname_or_str, varargin) % % ## Output % * __keylines__ Extracted lines for one or more images. In the - % first variant, a 1-by-N structure array. In the second - % variant of the method, `keylines{i}` is a set of keylines - % detected in `imgs{i}`. Each keyline is described with a - % `KeyLine` structure with the following fields: - % * __angle__ orientation of the line. - % * **class_id** object ID, that can be used to cluster - % keylines by the line they represent. - % * __octave__ octave (pyramid layer), from which the - % keyline has been extracted. - % * __pt__ coordinates of the middlepoint `[x,y]`. - % * __response__ the response, by which the strongest - % keylines have been selected. It's represented by the - % ratio between line's length and maximum between - % image's width and height. - % * __size__ minimum area containing line. - % * __startPoint__ the start point of the line in the - % original image `[x,y]`. - % * __endPoint__ the end point of the line in the original - % image `[x,y]`. - % * __startPointInOctave__ the start point of the line in - % the octave it was extracted from `[x,y]`. - % * __endPointInOctave__ the end point of the line in the - % octave it was extracted from `[x,y]`. - % * __lineLength__ the length of line. - % * __numOfPixels__ number of pixels covered by the line. + % first variant, a 1-by-N structure array. In the second variant + % of the method, `keylines{i}` is a set of keylines detected in + % `imgs{i}`. Each keyline is described with a `KeyLine` + % structure with the following fields: + % * __angle__ orientation of the line. + % * **class_id** object ID, that can be used to cluster keylines + % by the line they represent. + % * __octave__ octave (pyramid layer), from which the keyline + % has been extracted. + % * __pt__ coordinates of the middlepoint `[x,y]`. + % * __response__ the response, by which the strongest keylines + % have been selected. It's represented by the ratio between + % line's length and maximum between image's width and height. + % * __size__ minimum area containing line. + % * __startPoint__ the start point of the line in the original + % image `[x,y]`. + % * __endPoint__ the end point of the line in the original image + % `[x,y]`. + % * __startPointInOctave__ the start point of the line in the + % octave it was extracted from `[x,y]`. + % * __endPointInOctave__ the end point of the line in the octave + % it was extracted from `[x,y]`. + % * __lineLength__ the length of line. + % * __numOfPixels__ number of pixels covered by the line. % % ## Options % * __Mask__ optional mask matrix to detect only `KeyLines` of - % interest. It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each - % input image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % interest. It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % `KeyLine` is a struct to represent a line. % @@ -371,7 +370,6 @@ function load(this, fname_or_str, varargin) % in octave it was extracted from, about line's length and number % of pixels it covers. % - % % See also: cv.BinaryDescriptor.compute, % cv.BinaryDescriptor.detectAndCompute % @@ -381,28 +379,28 @@ function load(this, fname_or_str, varargin) function [descriptors, keylines] = compute(this, img, keylines, varargin) %COMPUTE Requires descriptors computation % - % [descriptors, keylines] = obj.compute(img, keylines) - % [descriptors, keylines] = obj.compute(imgs, keylines) + % [descriptors, keylines] = obj.compute(img, keylines) + % [descriptors, keylines] = obj.compute(imgs, keylines) % % ## Input % * __img__ Input image (first variant), 8-bit grayscale. % * __imgs__ Image set (second variant), cell array of images. % * __keylines__ Input collection of keylines containing lines for - % which descriptors must be computed. In the first variant, - % this is a struct-array of detected lines. In the second - % variant, it is a cell-array, where `keylines{i}` is a set - % of lines detected in `imgs{i}`. + % which descriptors must be computed. In the first variant, this + % is a struct-array of detected lines. In the second variant, it + % is a cell-array, where `keylines{i}` is a set of lines + % detected in `imgs{i}`. % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keylines{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keylines{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keylines__ Optional output with possibly updated keylines. % % ## Options % * __ReturnFloatDescr__ flag (when set to true, original - % non-binary descriptors are returned). default false + % non-binary descriptors are returned). default false % % See also: cv.BinaryDescriptor.detect, % cv.BinaryDescriptor.detectAndCompute @@ -413,8 +411,8 @@ function load(this, fname_or_str, varargin) function [keylines, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Define operator to perform detection of KeyLines and computation of descriptors in a row % - % [keylines, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keylines, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input image, 8-bit grayscale. @@ -425,17 +423,16 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Mask__ Optional mask matrix to select which lines in - % `keylines` must be accepted among the ones extracted - % (used when `Keylines` option is not empty). - % Not set by default. + % `keylines` must be accepted among the ones extracted (used + % when `Keylines` option is not empty). Not set by default. % * __Keypoints__ Structure array that contains input lines (when - % filled, the detection part will be skipped and input lines - % will be passed as input to the algorithm computing - % descriptors. So if set, detection phase will be skipped - % and only computation of descriptors will be executed, - % using the lines provided. Not set by default. + % filled, the detection part will be skipped and input lines + % will be passed as input to the algorithm computing + % descriptors. So if set, detection phase will be skipped and + % only computation of descriptors will be executed, using the + % lines provided. Not set by default. % * __ReturnFloatDescr__ Flag (when set to true, original - % non-binary descriptors are returned). default false + % non-binary descriptors are returned). default false % % See also: cv.BinaryDescriptor.detect, cv.BinaryDescriptor.compute % diff --git a/opencv_contrib/+cv/BinaryDescriptorMatcher.m b/opencv_contrib/+cv/BinaryDescriptorMatcher.m index 887c7ba1e..feb76df28 100644 --- a/opencv_contrib/+cv/BinaryDescriptorMatcher.m +++ b/opencv_contrib/+cv/BinaryDescriptorMatcher.m @@ -31,7 +31,7 @@ % when `||h-g||_H <= r` (where `||.||_H` is the Hamming norm), there must % exist a substring `k` (with `1 <= k <= m`) such that: % - % || h^(k) - g^(k) ||_H <= floor(r/m) + % || h^(k) - g^(k) ||_H <= floor(r/m) % % That means that if Hamming distance between each of the `m` substring is % strictly greater than `floor(r/m)`, then `||h-g||_H` must be larger that @@ -55,14 +55,15 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = BinaryDescriptorMatcher() %BINARYDESCRIPTORMATCHER Constructor % - % matcher = cv.BinaryDescriptorMatcher() + % matcher = cv.BinaryDescriptorMatcher() % % The BinaryDescriptorMatcher constructed is able to store and % manage 256-bits long entries. @@ -75,7 +76,7 @@ function delete(this) %DELETE Destructor % - % matcher.delete() + % matcher.delete() % % See also: cv.BinaryDescriptorMatcher % @@ -89,7 +90,7 @@ function delete(this) function clear(this) %CLEAR Clear dataset and internal data % - % matcher.clear() + % matcher.clear() % % See also: cv.BinaryDescriptorMatcher.empty % @@ -99,7 +100,7 @@ function clear(this) function status = empty(this) %EMPTY Returns true if there are no train descriptors in the collection % - % status = matcher.empty() + % status = matcher.empty() % % ## Output % * __status__ boolean status @@ -112,7 +113,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % matcher.save(filename) + % matcher.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -128,21 +129,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % matcher.load(fname) - % matcher.load(str, 'FromString',true) - % matcher.load(..., 'OptionName',optionValue, ...) + % matcher.load(fname) + % matcher.load(str, 'FromString',true) + % matcher.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -156,11 +157,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = matcher.getDefaultName() + % name = matcher.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BinaryDescriptorMatcher.save, cv.BinaryDescriptorMatcher.load % @@ -173,13 +174,13 @@ function load(this, fname_or_str, varargin) function add(this, descriptors) %ADD Store locally new descriptors to be inserted in dataset, without updating dataset % - % matcher.add(descriptors) + % matcher.add(descriptors) % % ## Input % * __descriptors__ cell array of matrices containing descriptors - % to be inserted into dataset. Each matrix `descriptors{i}` - % should contain descriptors relative to lines extracted - % from i-th image. + % to be inserted into dataset. Each matrix `descriptors{i}` + % should contain descriptors relative to lines extracted from + % i-th image. % % See also: cv.BinaryDescriptorMatcher.clear % @@ -189,7 +190,7 @@ function add(this, descriptors) function train(this) %TRAIN Update dataset by inserting into it all descriptors that were stored locally by add function % - % matcher.train() + % matcher.train() % % NOTE: Every time this function is invoked, current dataset is % deleted and locally stored descriptors are inserted into @@ -204,9 +205,9 @@ function train(this) function matches = match(this, queryDescriptors, varargin) %MATCH For every input query descriptor, retrieve the best matching one from a dataset provided from user or from the one internal to class % - % matches = matcher.match(queryDescriptors, trainDescriptors) - % matches = matcher.match(queryDescriptors) - % [...] = matcher.match(..., 'OptionName', optionValue, ...) + % matches = matcher.match(queryDescriptors, trainDescriptors) + % matches = matcher.match(queryDescriptors) + % [...] = matcher.match(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ query descriptors. @@ -217,10 +218,10 @@ function train(this) % % ## Options % * __Mask__ mask to select which input descriptors must be - % matched to one in dataset. In the second variant, a vector - % of masks (the i-th mask in vector indicates whether each - % input query can be matched with descriptors in dataset - % relative to i-th image). Not set by default. + % matched to one in dataset. In the second variant, a vector of + % masks (the i-th mask in vector indicates whether each input + % query can be matched with descriptors in dataset relative to + % i-th image). Not set by default. % % For every input descriptor, find the best matching one: % @@ -236,28 +237,28 @@ function train(this) function matches = knnMatch(this, queryDescriptors, varargin) %KNNMATCH For every input query descriptor, retrieve the best k matching ones from a dataset provided from user or from the one internal to class % - % matches = matcher.knnMatch(queryDescriptors, trainDescriptors, k) - % matches = matcher.knnMatch(queryDescriptors, k) - % [...] = matcher.knnMatch(..., 'OptionName', optionValue, ...) + % matches = matcher.knnMatch(queryDescriptors, trainDescriptors, k) + % matches = matcher.knnMatch(queryDescriptors, k) + % [...] = matcher.knnMatch(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ query descriptors. % * __trainDescriptors__ dataset of descriptors furnished by user. % * __k__ number of the closest descriptors to be returned for - % every input query. + % every input query. % % ## Output % * __matches__ vector to host retrieved matches. % % ## Options % * __Mask__ mask to select which input descriptors must be - % matched to ones in dataset. A vector of masks in the - % second variant (the i-th mask in vector indicates whether - % each input query can be matched with descriptors in - % dataset relative to i-th image). Not set by default. + % matched to ones in dataset. A vector of masks in the second + % variant (the i-th mask in vector indicates whether each input + % query can be matched with descriptors in dataset relative to + % i-th image). Not set by default. % * __CompactResult__ flag to obtain a compact result (if true, a - % vector that doesn't contain any matches for a given query - % is not inserted in final result). default false + % vector that doesn't contain any matches for a given query is + % not inserted in final result). default false % % For every input descriptor, find the best k matching descriptors: % @@ -274,9 +275,9 @@ function train(this) function matches = radiusMatch(this, queryDescriptors, varargin) %RADIUSMATCH For every input query descriptor, retrieve, from a dataset provided from user or from the one internal to class, all the descriptors that are not further than maxDist from input query % - % matches = matcher.radiusMatch(queryDescriptors, trainDescriptors, maxDistance) - % matches = matcher.radiusMatch(queryDescriptors, maxDistance) - % [...] = matcher.radiusMatch(..., 'OptionName', optionValue, ...) + % matches = matcher.radiusMatch(queryDescriptors, trainDescriptors, maxDistance) + % matches = matcher.radiusMatch(queryDescriptors, maxDistance) + % [...] = matcher.radiusMatch(..., 'OptionName', optionValue, ...) % % ## Input % * __queryDescriptors__ query descriptors. @@ -288,13 +289,13 @@ function train(this) % % ## Options % * __Mask__ mask to select which input descriptors must be - % matched to ones in dataset. A vector of masks in the - % second variant (the i-th mask in vector indicates whether - % each input query can be matched with descriptors in - % dataset relative to i-th image). Not set by default. + % matched to ones in dataset. A vector of masks in the second + % variant (the i-th mask in vector indicates whether each input + % query can be matched with descriptors in dataset relative to + % i-th image). Not set by default. % * __CompactResult__ flag to obtain a compact result (if true, a - % vector that doesn't contain any matches for a given query - % is not inserted in final result). default false + % vector that doesn't contain any matches for a given query is + % not inserted in final result). default false % % For every input desciptor, find all the ones falling in a % certaing matching radius: diff --git a/opencv_contrib/+cv/BoostDesc.m b/opencv_contrib/+cv/BoostDesc.m index e8c8d8109..97d6c7055 100644 --- a/opencv_contrib/+cv/BoostDesc.m +++ b/opencv_contrib/+cv/BoostDesc.m @@ -18,41 +18,42 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = BoostDesc(varargin) %BOOSTDESC Constructor % - % obj = cv.BoostDesc() - % obj = cv.BoostDesc('OptionName',optionValue, ...) + % obj = cv.BoostDesc() + % obj = cv.BoostDesc('OptionName',optionValue, ...) % % ## Options % * __Desc__ Type of descriptor to use, 'BinBoost256' is default - % (256 bit long dimension). Available types are: - % * __BGM__ - % * __BGMHard__ - % * __BGMBilinear__ - % * __LBGM__ - % * __BinBoost64__ - % * __BinBoost128__ - % * __BinBoost256__ + % (256 bit long dimension). Available types are: + % * __BGM__ + % * __BGMHard__ + % * __BGMBilinear__ + % * __LBGM__ + % * __BinBoost64__ + % * __BinBoost128__ + % * __BinBoost256__ % * __UseScaleOrientation__ Sample patterns using keypoints - % orientation. default true + % orientation. default true % * __ScaleFactor__ Adjust the sampling window of detected - % keypoints. - % * 6.25 is default and fits for cv.KAZE, cv.SURF detected - % keypoints window ratio - % * 6.75 should be the scale for cv.SIFT detected keypoints - % window ratio - % * 5.00 should be the scale for cv.AKAZE, cv.MSDDetector, - % cv.AgastFeatureDetector, cv.FastFeatureDetector, - % cv.BRISK keypoints window ratio - % * 0.75 should be the scale for cv.ORB keypoints ratio - % * 1.50 was the default in original implementation - % - % ## Note + % keypoints. + % * 6.25 is default and fits for cv.KAZE, cv.SURF detected + % keypoints window ratio + % * 6.75 should be the scale for cv.SIFT detected keypoints + % window ratio + % * 5.00 should be the scale for cv.AKAZE, cv.MSDDetector, + % cv.AgastFeatureDetector, cv.FastFeatureDetector, cv.BRISK + % keypoints window ratio + % * 0.75 should be the scale for cv.ORB keypoints ratio + % * 1.50 was the default in original implementation + % + % ### Note % `BGM` is the base descriptor where each binary dimension is % computed as the output of a single weak learner. `BGMHard` and % `BGMBilinear` refer to same `BGM` but use different type of @@ -77,7 +78,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BoostDesc % @@ -88,7 +89,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -102,7 +103,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BoostDesc.empty, cv.BoostDesc.load % @@ -112,11 +113,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.BoostDesc.clear % @@ -126,7 +127,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -142,21 +143,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -170,11 +171,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BoostDesc.save, cv.BoostDesc.load % @@ -187,16 +188,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % See also: cv.BoostDesc.compute, cv.DescriptorMatcher % @@ -206,7 +207,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -219,7 +220,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -232,26 +233,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.BoostDesc.BoostDesc diff --git a/opencv_contrib/+cv/BriefDescriptorExtractor.m b/opencv_contrib/+cv/BriefDescriptorExtractor.m index 0c624811a..47cc53835 100644 --- a/opencv_contrib/+cv/BriefDescriptorExtractor.m +++ b/opencv_contrib/+cv/BriefDescriptorExtractor.m @@ -14,21 +14,22 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = BriefDescriptorExtractor(varargin) %BRIEFDESCRIPTOREXTRACTOR Constructor % - % obj = cv.BriefDescriptorExtractor() - % obj = cv.BriefDescriptorExtractor(..., 'OptionName',optionValue, ...) + % obj = cv.BriefDescriptorExtractor() + % obj = cv.BriefDescriptorExtractor(..., 'OptionName',optionValue, ...) % % ## Options % * __Bytes__ legth of the descriptor in bytes, valid values are: - % 16, 32 (default) or 64. + % 16, 32 (default) or 64. % * __UseOrientation__ sample patterns using keypoints orientation, - % disabled by default. default false. + % disabled by default. default false. % % See also: cv.BriefDescriptorExtractor.compute % @@ -38,7 +39,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.BriefDescriptorExtractor % @@ -49,7 +50,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -63,7 +64,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.BriefDescriptorExtractor.empty, % cv.BriefDescriptorExtractor.load @@ -74,11 +75,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.BriefDescriptorExtractor.clear, % cv.BriefDescriptorExtractor.load @@ -89,7 +90,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -105,21 +106,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -133,11 +134,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.BriefDescriptorExtractor.save, % cv.BriefDescriptorExtractor.load @@ -151,16 +152,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for BRIEF. % @@ -173,11 +174,11 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size, as specified in `Bytes` argument in - % constructor. + % constructor. % % See also: cv.BriefDescriptorExtractor.descriptorType, % cv.BriefDescriptorExtractor.compute @@ -188,7 +189,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -204,26 +205,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.BriefDescriptorExtractor.BriefDescriptorExtractor diff --git a/opencv_contrib/+cv/DAISY.m b/opencv_contrib/+cv/DAISY.m index 48e8dee7f..3c1a919ab 100644 --- a/opencv_contrib/+cv/DAISY.m +++ b/opencv_contrib/+cv/DAISY.m @@ -14,41 +14,42 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = DAISY(varargin) %DAISY Constructor % - % obj = cv.DAISY() - % obj = cv.DAISY(..., 'OptionName',optionValue, ...) + % obj = cv.DAISY() + % obj = cv.DAISY(..., 'OptionName',optionValue, ...) % % ## Options % * __Radius__ radius of the descriptor at the initial scale. - % default 15 + % default 15 % * __RadiusQuant__ amount of radial range division quantity. - % default 3 + % default 3 % * __AngleQuant__ amount of angular range division quantity. - % default 8 + % default 8 % * __GradOrientationsQuant__ amount of gradient orientations - % range division quantity. default 8 + % range division quantity. default 8 % * __Normalization__ choose descriptors normalization type, where - % * __None__ will not do any normalization (default) - % * __Partial__ mean that histograms are normalized - % independently for L2 norm equal to 1.0 - % * __Full__ mean that descriptors are normalized for L2 - % norm equal to 1.0 - % * __SIFT__ mean that descriptors are normalized for L2 - % norm equal to 1.0 but no individual one is bigger - % than 0.154 as in SIFT + % * __None__ will not do any normalization (default) + % * __Partial__ mean that histograms are normalized + % independently for L2 norm equal to 1.0 + % * __Full__ mean that descriptors are normalized for L2 norm + % equal to 1.0 + % * __SIFT__ mean that descriptors are normalized for L2 norm + % equal to 1.0 but no individual one is bigger than 0.154 as + % in SIFT % * __H__ optional 3x3 homography matrix used to warp the grid of - % daisy but sampling keypoints remains unwarped on image. - % default empty + % daisy but sampling keypoints remains unwarped on image. + % default empty % * __Interpolation__ switch to disable interpolation for speed - % improvement at minor quality loss. default true + % improvement at minor quality loss. default true % * __UseOrientation__ sample patterns using keypoints - % orientation, disabled by default. default false + % orientation, disabled by default. default false % % See also: cv.DAISY.compute % @@ -58,7 +59,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.DAISY % @@ -69,7 +70,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -83,7 +84,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.DAISY.empty, cv.DAISY.load % @@ -93,11 +94,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.DAISY.clear % @@ -107,7 +108,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +124,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +152,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DAISY.save, cv.DAISY.load % @@ -168,16 +169,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `L2` for DAISY. % @@ -189,7 +190,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in floats % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -202,7 +203,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -217,28 +218,28 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. Input - % image is internally converted to 32-bit floating-point in - % [0,1] range. + % image is internally converted to 32-bit floating-point in [0,1] + % range. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.DAISY.DAISY, cv.DAISY.compute_all @@ -252,18 +253,18 @@ function load(this, fname_or_str, varargin) function descriptors = compute_all(this, image, varargin) %COMPUTE_ALL Compute all descriptors of an image % - % descriptors = obj.compute_all(image) - % descriptors = obj.compute_all(image, roi) + % descriptors = obj.compute_all(image) + % descriptors = obj.compute_all(image, roi) % % ## Input % * __image__ image to extract descriptors. % * __roi__ optional region of interest within image - % `[x,y,width,height]`. + % `[x,y,width,height]`. % % ## Output % * __descriptors__ In the first variant, descriptors are computed - % for all image pixels. In the second variant, resulted - % descriptors array for ROI image pixels. + % for all image pixels. In the second variant, resulted + % descriptors array for ROI image pixels. % % See also: cv.DAISY.compute % @@ -273,8 +274,8 @@ function load(this, fname_or_str, varargin) function descriptor = GetDescriptor(this, y, x, orientation, varargin) %GETDESCRIPTOR Compute descriptor for the specified position % - % descriptor = obj.GetDescriptor(y, x, orientation) - % [...] = obj.GetDescriptor(..., 'OptionName',optionValue, ...) + % descriptor = obj.GetDescriptor(y, x, orientation) + % [...] = obj.GetDescriptor(..., 'OptionName',optionValue, ...) % % ## Input % * __y__ position y on image. @@ -285,10 +286,10 @@ function load(this, fname_or_str, varargin) % * __descriptor__ Output descriptor array computed at pixel. % % ## Options - % * __H__ optional 3x3 homography matrix for warped grid. - % Not set by default + % * __H__ optional 3x3 homography matrix for warped grid. Not set + % by default % * __Unnormalized__ set to return the unnormalized descriptor. - % default false + % default false % % See also: cv.DAISY.compute % diff --git a/opencv_contrib/+cv/DISOpticalFlow.m b/opencv_contrib/+cv/DISOpticalFlow.m index 873e3407a..b153831c7 100644 --- a/opencv_contrib/+cv/DISOpticalFlow.m +++ b/opencv_contrib/+cv/DISOpticalFlow.m @@ -25,7 +25,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -72,14 +73,14 @@ function this = DISOpticalFlow(varargin) %DISOPTICALFLOW Creates an instance of DISOpticalFlow % - % obj = cv.DISOpticalFlow() - % obj = cv.DISOpticalFlow('OptionName',optionValue, ...) + % obj = cv.DISOpticalFlow() + % obj = cv.DISOpticalFlow('OptionName',optionValue, ...) % % ## Options % * __Preset__ preset one of: - % * __UltraFast__ - % * __Fast__ (default) - % * __Medium__ + % * __UltraFast__ + % * __Fast__ (default) + % * __Medium__ % % See also: cv.DISOpticalFlow.calc % @@ -89,7 +90,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.DISOpticalFlow % @@ -103,17 +104,17 @@ function delete(this) function flow = calc(this, I0, I1, varargin) %CALC Calculates an optical flow % - % flow = obj.calc(I0, I1) - % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) + % flow = obj.calc(I0, I1) + % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * __flow__ computed flow image that has the same size as `I0` - % and type `single` (2-channels). + % and type `single` (2-channels). % % ## Options % * __InitialFlow__ specify the initial flow. Not set by default. @@ -126,7 +127,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Releases all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % DISOpticalFlow_(this.id, 'collectGarbage'); end @@ -137,7 +138,7 @@ function collectGarbage(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.DISOpticalFlow.empty % @@ -147,11 +148,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.DISOpticalFlow.clear % @@ -161,11 +162,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DISOpticalFlow.save, cv.DISOpticalFlow.load % @@ -175,7 +176,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -190,21 +191,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/DPMDetector.m b/opencv_contrib/+cv/DPMDetector.m index bd06d4a08..3b1cdff3e 100644 --- a/opencv_contrib/+cv/DPMDetector.m +++ b/opencv_contrib/+cv/DPMDetector.m @@ -41,17 +41,17 @@ % ## Example % The basic usage is the following: % - % img = imread(fullfile(mexopencv.root(),'test','cat.jpg')); - % detector = cv.DPMDetector(fullfile(mexopencv.root(),'test','cat.xml')); - % detections = detector.detect(img); - % for i=1:numel(detections) - % img = cv.rectangle(img, detections(i).rect, 'Color',[0 255 0]); - % end - % imshow(img) + % img = imread(fullfile(mexopencv.root(),'test','cat.jpg')); + % detector = cv.DPMDetector(fullfile(mexopencv.root(),'test','cat.xml')); + % detections = detector.detect(img); + % for i=1:numel(detections) + % img = cv.rectangle(img, detections(i).rect, 'Color',[0 255 0]); + % end + % imshow(img) % % The detector can also accept multiple models as cell array of strings: % - % detector = cv.DPMDetector({'cat.xml','car.xml'}); + % detector = cv.DPMDetector({'cat.xml','car.xml'}); % % The XML file must be a format compatible to OpenCV's DPM detector, but % you can convert models from the original implementation in @@ -87,17 +87,16 @@ function this = DPMDetector(filenames, varargin) %DPMDETECTOR Load and create a new detector % - % detector = cv.DPMDetector(filenames) - % detector = cv.DPMDetector(filenames, classnames) + % detector = cv.DPMDetector(filenames) + % detector = cv.DPMDetector(filenames, classnames) % % ## Input % * __filenames__ A set of filenames storing the trained detectors - % (models). Each file contains one model. + % (models). Each file contains one model. % * __classnames__ (optional) A set of trained models names. If - % it's empty then the name of each model will be - % constructed from the name of file containing the model. - % E.g. the model stored in "/home/user/cat.xml" will get - % the name "cat". + % it's empty then the name of each model will be constructed + % from the name of file containing the model. E.g. the model + % stored in "/home/user/cat.xml" will get the name "cat". % % Load the trained models from given .xml files and return a new % DPMDetector detector. @@ -117,7 +116,7 @@ function delete(this) %DELETE Destructor % - % detector.delete() + % detector.delete() % % See also: cv.DPMDetector % @@ -128,7 +127,7 @@ function delete(this) function status = isEmpty(this) %ISEMPTY Check if the detector is empty % - % status = detector.isEmpty() + % status = detector.isEmpty() % % ## Output % * __status__ boolean @@ -141,7 +140,7 @@ function delete(this) function names = getClassNames(this) %GETCLASSNAMES Get names of the object classes % - % names = detector.getClassNames() + % names = detector.getClassNames() % % ## Output % * __names__ a cell array of strings. @@ -157,7 +156,7 @@ function delete(this) function count = getClassCount(this) %GETCLASSCOUNT Return a count of loaded models (classes) % - % count = detector.getClassCount() + % count = detector.getClassCount() % % ## Output % * __count__ a numeric value. @@ -170,18 +169,18 @@ function delete(this) function objects = detect(this, img) %DETECT Detects objects % - % objects = detector.detect(img) + % objects = detector.detect(img) % % ## Input % * __img__ An image where objects are to be detected - % (8-bit integer or 64-bit floating-point color image). + % (8-bit integer or 64-bit floating-point color image). % % ## Output % * __objects__ The detections. A struct array of detected objects - % with the following fields: - % * __rect__ rectangle `[x,y,w,h]` of the object - % * __score__ score of the detection - % * __class__ name of the object class + % with the following fields: + % * __rect__ rectangle `[x,y,w,h]` of the object + % * __score__ score of the detection + % * __class__ name of the object class % % Find rectangular regions in the given image that are likely to % contain objects of loaded classes (models) and corresponding @@ -197,21 +196,21 @@ function delete(this) function mat2opencvxml(matpath, xmlpath) %MAT2OPENCVXML Convert DPM 2007 model (MAT) to cascade model (XML) % - % cv.DPMDetector.mat2opencvxml(matpath, xmlpath) + % cv.DPMDetector.mat2opencvxml(matpath, xmlpath) % % ## Input % * __matpath__ input MAT filename, path to the DPM VOC 2007 - % model. + % model. % * __xmlpath__ output XML filename, path to the OpenCV file - % storage model. + % storage model. % % The method converts [Felzenszwalb2010] model files to xml % format specified by OpenCV's implementation. The usage is the % following: % - % matpath = 'VOC2007/cat_final.mat'; - % xmlpath = 'cat.xml'; - % cv.DPMDetector.mat2opencvxml(matpath, xmlpath); + % matpath = 'VOC2007/cat_final.mat'; + % xmlpath = 'cat.xml'; + % cv.DPMDetector.mat2opencvxml(matpath, xmlpath); % % Check the latest models in: % diff --git a/opencv_contrib/+cv/DTFilter.m b/opencv_contrib/+cv/DTFilter.m index 4a612c6b6..8ed287dca 100644 --- a/opencv_contrib/+cv/DTFilter.m +++ b/opencv_contrib/+cv/DTFilter.m @@ -13,34 +13,35 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = DTFilter(guide, varargin) %DTFILTER Factory method, create instance of DTFilter and produce initialization routines % - % obj = cv.DTFilter(guide) - % obj = cv.DTFilter(guide, 'OptionName',optionValue, ...) + % obj = cv.DTFilter(guide) + % obj = cv.DTFilter(guide, 'OptionName',optionValue, ...) % % ## Input % * __guide__ guided image (used to build transformed distance, - % which describes edge structure of guided image). + % which describes edge structure of guided image). % % ## Options % * __SigmaSpatial__ `sigma_H` parameter in the original article, - % it's similar to the sigma in the coordinate space into - % cv.bilateralFilter. default 10.0 + % it's similar to the sigma in the coordinate space into + % cv.bilateralFilter. default 10.0 % * __SigmaColor__ `sigma_r` parameter in the original article, - % it's similar to the sigma in the color space into - % cv.bilateralFilter. default 25.0 + % it's similar to the sigma in the color space into + % cv.bilateralFilter. default 25.0 % * __Mode__ one form three modes which corresponds to three modes - % for filtering 2D signals in the article. default 'NC': - % * __NC__ Normalized Convolution (NC). - % * __IC__ Interpolated Convolution (IC). - % * __RF__ Recursive Filtering (RF). + % for filtering 2D signals in the article. default 'NC': + % * __NC__ Normalized Convolution (NC). + % * __IC__ Interpolated Convolution (IC). + % * __RF__ Recursive Filtering (RF). % * __NumIters__ optional number of iterations used for filtering, - % 3 is quite enough. default 3 + % 3 is quite enough. default 3 % % For more details about Domain Transform filter parameters, see % the original article [Gastal11] and Domain Transform filter @@ -54,7 +55,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.DTFilter % @@ -68,7 +69,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.DTFilter.empty, cv.DTFilter.load % @@ -78,11 +79,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.DTFilter.clear, cv.DTFilter.load % @@ -92,7 +93,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -108,21 +109,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -136,11 +137,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DTFilter.save, cv.DTFilter.load % @@ -153,20 +154,19 @@ function load(this, fname_or_str, varargin) function dst = filter(this, src, varargin) %FILTER Produce domain transform filtering operation on source image % - % dst = obj.filter(src) - % dst = obj.filter(src, 'OptionName',optionValue, ...) + % dst = obj.filter(src) + % dst = obj.filter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ filtering image with unsigned 8-bit or floating-point - % 32-bit depth and up to 4 channels. + % 32-bit depth and up to 4 channels. % % ## Output % * __dst__ destination image. % % ## Options % * __DDepth__ optional depth of the output image. `DDepth` can be - % set to -1, which will be equivalent to `class(src)`. - % default -1 + % set to -1, which will be equivalent to `class(src)`. default -1 % % See also: cv.DTFilter.DTFilter, cv.DTFilter.dtFilter % @@ -178,33 +178,33 @@ function load(this, fname_or_str, varargin) function dst = dtFilter(src, guide, varargin) %DTFILTER Simple one-line Domain Transform filter call % - % dst = cv.DTFilter.dtFilter(src, guide) - % dst = cv.DTFilter.dtFilter(src, guide, 'OptionName',optionValue, ...) + % dst = cv.DTFilter.dtFilter(src, guide) + % dst = cv.DTFilter.dtFilter(src, guide, 'OptionName',optionValue, ...) % % ## Input % * __src__ filtering image with unsigned 8-bit or floating-point - % 32-bit depth and up to 4 channels. + % 32-bit depth and up to 4 channels. % * __guide__ guided image (also called as joint image) with - % unsigned 8-bit or floating-point 32-bit depth and up to 4 - % channels. + % unsigned 8-bit or floating-point 32-bit depth and up to 4 + % channels. % % ## Output % * __dst__ destination image. % % ## Options % * __SigmaSpatial__ `sigma_H` parameter in the original article, - % it's similar to the sigma in the coordinate space into - % cv.bilateralFilter. default 10.0 + % it's similar to the sigma in the coordinate space into + % cv.bilateralFilter. default 10.0 % * __SigmaColor__ `sigma_r` parameter in the original article, - % it's similar to the sigma in the color space into - % cv.bilateralFilter. default 25.0 + % it's similar to the sigma in the color space into + % cv.bilateralFilter. default 25.0 % * __Mode__ one form three modes which corresponds to three modes - % for filtering 2D signals in the article. default 'NC': - % * __NC__ Normalized Convolution (NC). - % * __IC__ Interpolated Convolution (IC). - % * __RF__ Recursive Filtering (RF). + % for filtering 2D signals in the article. default 'NC': + % * __NC__ Normalized Convolution (NC). + % * __IC__ Interpolated Convolution (IC). + % * __RF__ Recursive Filtering (RF). % * __NumIters__ optional number of iterations used for filtering, - % 3 is quite enough. default 3 + % 3 is quite enough. default 3 % % If you have multiple images to filter with the same guided image % then use DTFilter interface to avoid extra computations on diff --git a/opencv_contrib/+cv/Dataset.m b/opencv_contrib/+cv/Dataset.m index dac31341d..7850666ab 100644 --- a/opencv_contrib/+cv/Dataset.m +++ b/opencv_contrib/+cv/Dataset.m @@ -32,45 +32,47 @@ % properties (SetAccess = private) - id % Object ID - klass % Object class + % Object ID + id + % Object class + klass end methods function this = Dataset(dstype) %DATASET Constructor % - % ds = cv.Dataset(dstype) + % ds = cv.Dataset(dstype) % % ## Input % * __dstype__ Dataset class implementation. One of: - % * **AR_hmdb** HMDB: A Large Human Motion Database - % * **AR_sports** Sports-1M Dataset - % * **FR_adience** Adience - % * **FR_lfw** Labeled Faces in the Wild - % * **GR_chalearn** ChaLearn Looking at People - % * **GR_skig** Sheffield Kinect Gesture Dataset - % * **HPE_humaneva** HumanEva Dataset - % * **HPE_parse** PARSE Dataset - % * **IR_affine** Affine Covariant Regions Datasets - % * **IR_robot** Robot Data Set - % * **IS_bsds** The Berkeley Segmentation Dataset and Benchmark - % * **IS_weizmann** Weizmann Segmentation Evaluation Database - % * **MSM_epfl** EPFL Multi-View Stereo - % * **MSM_middlebury** Stereo - Middlebury Computer Vision - % * **OR_imagenet** ImageNet - % * **OR_mnist** MNIST - % * **OR_pascal** PASCAL Object Recognition Database - % * **OR_sun** SUN Database - % * **PD_caltech** Caltech Pedestrian Detection Benchmark - % * **PD_inria** INRIA Person Dataset - % * **SLAM_kitti** KITTI Vision Benchmark - % * **SLAM_tumindoor** TUMindoor Dataset - % * **TR_chars** The Chars74K Dataset - % * **TR_icdar** ICDAR - % * **TR_svt** The Street View Text Dataset - % * **TRACK_vot** VOT 2015 Database - % * **TRACK_alov** Amsterdam Library of Ordinary Videos (ALOV++) + % * **AR_hmdb** HMDB: A Large Human Motion Database + % * **AR_sports** Sports-1M Dataset + % * **FR_adience** Adience + % * **FR_lfw** Labeled Faces in the Wild + % * **GR_chalearn** ChaLearn Looking at People + % * **GR_skig** Sheffield Kinect Gesture Dataset + % * **HPE_humaneva** HumanEva Dataset + % * **HPE_parse** PARSE Dataset + % * **IR_affine** Affine Covariant Regions Datasets + % * **IR_robot** Robot Data Set + % * **IS_bsds** The Berkeley Segmentation Dataset and Benchmark + % * **IS_weizmann** Weizmann Segmentation Evaluation Database + % * **MSM_epfl** EPFL Multi-View Stereo + % * **MSM_middlebury** Stereo - Middlebury Computer Vision + % * **OR_imagenet** ImageNet + % * **OR_mnist** MNIST + % * **OR_pascal** PASCAL Object Recognition Database + % * **OR_sun** SUN Database + % * **PD_caltech** Caltech Pedestrian Detection Benchmark + % * **PD_inria** INRIA Person Dataset + % * **SLAM_kitti** KITTI Vision Benchmark + % * **SLAM_tumindoor** TUMindoor Dataset + % * **TR_chars** The Chars74K Dataset + % * **TR_icdar** ICDAR + % * **TR_svt** The Street View Text Dataset + % * **TRACK_vot** VOT 2015 Database + % * **TRACK_alov** Amsterdam Library of Ordinary Videos (ALOV++) % % ### HMDB: A Large Human Motion Database % @@ -85,8 +87,8 @@ % and remove them. % - To load data run: % - % ds = cv.Dataset('AR_hmdb'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('AR_hmdb'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % Benchmark: % @@ -112,8 +114,8 @@ % [files](https://code.google.com/p/sports-1m-dataset/). % - To load data run: % - % ds = cv.Dataset('AR_sports'); - % ds.load('/home/user/path_to_downloaded_folders/'); + % ds = cv.Dataset('AR_sports'); + % ds.load('/home/user/path_to_downloaded_folders/'); % % ### Adience % @@ -131,8 +133,8 @@ % the same folder. % - To load data run: % - % ds = cv.Dataset('FR_adience'); - % ds.load('/home/user/path_to_created_folder/'); + % ds = cv.Dataset('FR_adience'); + % ds.load('/home/user/path_to_created_folder/'); % % ### Labeled Faces in the Wild % @@ -149,8 +151,8 @@ % `pairsDevTrain.txt` in created folder. % - To load data run: % - % ds = cv.Dataset('FR_lfw'); - % ds.load('/home/user/path_to_unpacked_folder/lfw2/'); + % ds = cv.Dataset('FR_lfw'); + % ds.load('/home/user/path_to_unpacked_folder/lfw2/'); % % Benchmark: % @@ -181,8 +183,8 @@ % `Sample0001/` % - To load data run: % - % ds = cv.Dataset('GR_chalearn'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('GR_chalearn'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % ### Sheffield Kinect Gesture Dataset % @@ -197,8 +199,8 @@ % - Unpack them. % - To load data run: % - % ds = cv.Dataset('GR_skig'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('GR_skig'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % ### HumanEva Dataset % @@ -212,8 +214,8 @@ % - Unpack them to `HumanEva_1` and `HumanEva_2` accordingly. % - To load data run: % - % ds = cv.Dataset('HPE_humaneva'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('HPE_humaneva'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % ### PARSE Dataset % @@ -226,8 +228,8 @@ % - Unpack it. % - To load data run: % - % ds = cv.Dataset('HPE_parse'); - % ds.load('/home/user/path_to_unpacked_folder/people_all/'); + % ds = cv.Dataset('HPE_parse'); + % ds.load('/home/user/path_to_unpacked_folder/people_all/'); % % ### Affine Covariant Regions Datasets % @@ -241,8 +243,8 @@ % - Unpack them. % - To load data, for example, for "bark", run: % - % ds = cv.Dataset('IR_affine'); - % ds.load('/home/user/path_to_unpacked_folder/bark/'); + % ds = cv.Dataset('IR_affine'); + % ds.load('/home/user/path_to_unpacked_folder/bark/'); % % ### Robot Data Set % @@ -256,8 +258,8 @@ % - Unpack them to one folder. % - To load data run: % - % ds = cv.Dataset('IR_robot'); - % ds.load('/home/user/path_to_unpacked_folder/'); + % ds = cv.Dataset('IR_robot'); + % ds.load('/home/user/path_to_unpacked_folder/'); % % ### The Berkeley Segmentation Dataset and Benchmark % @@ -271,8 +273,8 @@ % - Unpack them. % - To load data run: % - % ds = cv.Dataset('IS_bsds'); - % ds.load('/home/user/path_to_unpacked_folder/BSDS300/'); + % ds = cv.Dataset('IS_bsds'); + % ds.load('/home/user/path_to_unpacked_folder/BSDS300/'); % % ### Weizmann Segmentation Evaluation Database % @@ -286,8 +288,8 @@ % - Unpack them. % - To load data, for example, for `1 object` dataset, run: % - % ds = cv.Dataset('IS_weizmann'); - % ds.load('/home/user/path_to_unpacked_folder/1obj/'); + % ds = cv.Dataset('IS_weizmann'); + % ds.load('/home/user/path_to_unpacked_folder/1obj/'); % % ### EPFL Multi-View Stereo % @@ -306,8 +308,8 @@ % `fountain_dense_p.tar.gz -> P/` % - To load data, for example, for "fountain", run: % - % ds = cv.Dataset('MSM_epfl'); - % ds.load('/home/user/path_to_unpacked_folder/fountain/'); + % ds = cv.Dataset('MSM_epfl'); + % ds.load('/home/user/path_to_unpacked_folder/fountain/'); % % ### Stereo - Middlebury Computer Vision % @@ -321,8 +323,8 @@ % - Unpack them. % - To load data, for example "temple" dataset, run: % - % ds = cv.Dataset('MSM_middlebury'); - % ds.load('/home/user/path_to_unpacked_folder/temple/'); + % ds = cv.Dataset('MSM_middlebury'); + % ds.load('/home/user/path_to_unpacked_folder/temple/'); % % ### ImageNet % @@ -348,19 +350,21 @@ % - Unpack all tar files in train. % - To load data run: % - % ds = cv.Dataset('OR_imagenet'); - % ds.load('/home/user/some_folder/'); + % ds = cv.Dataset('OR_imagenet'); + % ds.load('/home/user/some_folder/'); % % Python script to parse `meta.mat`: % - % import scipy.io - % meta_mat = scipy.io.loadmat("devkit-1.0/data/meta.mat") + % ```python + % import scipy.io + % meta_mat = scipy.io.loadmat("devkit-1.0/data/meta.mat") % - % labels_dic = dict((m[0][1][0], m[0][0][0][0]-1) for m in meta_mat['synsets'] - % label_names_dic = dict((m[0][1][0], m[0][2][0]) for m in meta_mat['synsets'] + % labels_dic = dict((m[0][1][0], m[0][0][0][0]-1) for m in meta_mat['synsets'] + % label_names_dic = dict((m[0][1][0], m[0][2][0]) for m in meta_mat['synsets'] % - % for label in labels_dic.keys(): - % print "{0},{1},{2}".format(label, labels_dic[label], label_names_dic[label]) + % for label in labels_dic.keys(): + % print "{0},{1},{2}".format(label, labels_dic[label], label_names_dic[label]) + % ``` % % ### MNIST % @@ -375,8 +379,8 @@ % - Unpack them. % - To load data run: % - % ds = cv.Dataset('OR_mnist'); - % ds.load('/home/user/path_to_unpacked_files/'); + % ds = cv.Dataset('OR_mnist'); + % ds.load('/home/user/path_to_unpacked_files/'); % % ### SUN Database % @@ -391,8 +395,8 @@ % into folder: `SUN397/Partitions/` % - To load data run: % - % ds = cv.Dataset('OR_sun'); - % ds.load('/home/user/path_to_unpacked_files/SUN397/'); + % ds = cv.Dataset('OR_sun'); + % ds.load('/home/user/path_to_unpacked_files/SUN397/'); % % ### Caltech Pedestrian Detection Benchmark % @@ -406,8 +410,8 @@ % - Unpack them to separate folder. % - To load data run: % - % ds = cv.Dataset('PD_caltech'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('PD_caltech'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % Note: % @@ -438,8 +442,8 @@ % replaced after unpacking `data_odometry_calib.zip` at the end. % - To load data run: % - % ds = cv.Dataset('SLAM_kitti'); - % ds.load('/home/user/path_to_unpacked_folder/dataset/'); + % ds = cv.Dataset('SLAM_kitti'); + % ds.load('/home/user/path_to_unpacked_folder/dataset/'); % % ### TUMindoor Dataset % @@ -460,8 +464,8 @@ % `pointcloud.tar.bz2 -> pointcloud/`. % - To load each dataset run: % - % ds = cv.Dataset('SLAM_tumindoor'); - % ds.load('/home/user/path_to_unpacked_folders/'); + % ds = cv.Dataset('SLAM_tumindoor'); + % ds.load('/home/user/path_to_unpacked_folders/'); % % ### The Chars74K Dataset % @@ -478,8 +482,8 @@ % For example, `English/list_English_Img.m` for `EnglishImg.tgz`. % - To load data, for example "EnglishImg", run: % - % ds = cv.Dataset('TR_chars'); - % ds.load('/home/user/path_to_unpacked_folder/English/'); + % ds = cv.Dataset('TR_chars'); + % ds.load('/home/user/path_to_unpacked_folder/English/'); % % ### The Street View Text Dataset % @@ -492,8 +496,8 @@ % - Unpack it. % - To load data run: % - % ds = cv.Dataset('TR_svt'); - % ds.load('/home/user/path_to_unpacked_folder/svt/svt1/'); + % ds = cv.Dataset('TR_svt'); + % ds.load('/home/user/path_to_unpacked_folder/svt/svt1/'); % % Benchmark: % @@ -522,8 +526,8 @@ % - Unpack `vot2015.zip` into folder: `VOT2015/` % - To load data run: % - % ds = cv.Dataset('TRACK_vot'); - % ds.load('/home/user/path_to_unpacked_files/VOT2015/'); + % ds = cv.Dataset('TRACK_vot'); + % ds.load('/home/user/path_to_unpacked_files/VOT2015/'); % % ### Amsterdam Library of Ordinary Videos for tracking % @@ -539,7 +543,7 @@ function delete(this) %DELETE Destructor % - % ds.delete() + % ds.delete() % % See also: cv.Dataset % @@ -550,7 +554,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = ds.typeid() + % typename = ds.typeid() % % ## Output % * __typename__ Name of C++ type @@ -564,7 +568,7 @@ function delete(this) function load(this, dpath) %LOAD Load dataset % - % ds.load(dpth) + % ds.load(dpth) % % ## Input % * __dpath__ directory path for dataset files. @@ -582,7 +586,7 @@ function load(this, dpath) function num = getNumSplits(this) %GETNUMSPLITS Get Number of data splits % - % num = ds.getNumSplits() + % num = ds.getNumSplits() % % ## Output % * __num__ number of splits. @@ -596,13 +600,13 @@ function load(this, dpath) function data = getTrain(this, varargin) %GETTRAIN Get training data % - % data = ds.getTrain('OptionName',optionValue, ...) + % data = ds.getTrain('OptionName',optionValue, ...) % % ## Output % * __data__ training data samples. % % ## Options - % * __ SplitNum__ split number. default 0 + % * __SplitNum__ split number. default 0 % % See also: cv.Dataset.getTest, cv.Dataset.getValidation % @@ -612,13 +616,13 @@ function load(this, dpath) function data = getTest(this, varargin) %GETTEST Get testing data % - % data = ds.getTest('OptionName',optionValue, ...) + % data = ds.getTest('OptionName',optionValue, ...) % % ## Output % * __data__ testing data samples. % % ## Options - % * __ SplitNum__ split number. default 0 + % * __SplitNum__ split number. default 0 % % See also: cv.Dataset.getTrain, cv.Dataset.getValidation % @@ -628,13 +632,13 @@ function load(this, dpath) function data = getValidation(this, varargin) %GETVALIDATION Get validation data % - % data = ds.getValidation('OptionName',optionValue, ...) + % data = ds.getValidation('OptionName',optionValue, ...) % % ## Output % * __data__ validation data samples. % % ## Options - % * __ SplitNum__ split number. default 0 + % * __SplitNum__ split number. default 0 % % See also: cv.Dataset.getTrain, cv.Dataset.getTest % @@ -647,7 +651,7 @@ function load(this, dpath) function createDirectory(dirPath) %CREATEDIRECTORY Create directory % - % cv.Dataset.createDirectory(dirPath) + % cv.Dataset.createDirectory(dirPath) % % ## Input % * __dirPath__ directory path. @@ -660,7 +664,7 @@ function createDirectory(dirPath) function fileNames = getDirList(dirName) %GETDIRLIST Get directory listing % - % fileNames = cv.Dataset.createDirectory(dirName) + % fileNames = cv.Dataset.createDirectory(dirName) % % ## Input % * __dirName__ directory name. @@ -676,7 +680,7 @@ function createDirectory(dirPath) function elems = split(s, delim) %SPLIT Split string by delimiter % - % elems = cv.Dataset.split(s, delim) + % elems = cv.Dataset.split(s, delim) % % ## Input % * __s__ string. diff --git a/opencv_contrib/+cv/DisparityWLSFilter.m b/opencv_contrib/+cv/DisparityWLSFilter.m index 561364ee2..03ade6dc9 100644 --- a/opencv_contrib/+cv/DisparityWLSFilter.m +++ b/opencv_contrib/+cv/DisparityWLSFilter.m @@ -11,7 +11,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -46,21 +47,20 @@ function this = DisparityWLSFilter(in) %DISPARITYWLSFILTER Factory method to create instance of DisparityWLSFilter % - % obj = cv.DisparityWLSFilter(matcher_left) - % obj = cv.DisparityWLSFilter(use_confidence) + % obj = cv.DisparityWLSFilter(matcher_left) + % obj = cv.DisparityWLSFilter(use_confidence) % % ## Input % * **matcher_left** stereo matcher instance that will be used - % with the filter. An object of one of the following - % classes: - % * __cv.StereoBM__ stereo correspondence using the block - % matching algorithm. - % * __cv.StereoSGBM__ stereo correspondence using the - % semi-global block matching algorithm. + % with the filter. An object of one of the following classes: + % * __cv.StereoBM__ stereo correspondence using the block + % matching algorithm. + % * __cv.StereoSGBM__ stereo correspondence using the + % semi-global block matching algorithm. % * **use_confidence** Boolean. Filtering with confidence requires - % two disparity maps (for the left and right views) and is - % approximately two times slower. However, quality is - % typically significantly better. + % two disparity maps (for the left and right views) and is + % approximately two times slower. However, quality is typically + % significantly better. % % The first variant is a convenience factory method that creates % an instance of DisparityWLSFilter and sets up all the relevant @@ -86,7 +86,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.DisparityWLSFilter % @@ -100,7 +100,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.DisparityWLSFilter.empty, cv.DisparityWLSFilter.load % @@ -110,11 +110,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.DisparityWLSFilter.clear, cv.DisparityWLSFilter.load % @@ -124,7 +124,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -140,21 +140,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -168,11 +168,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.DisparityWLSFilter.save, cv.DisparityWLSFilter.load % @@ -185,34 +185,33 @@ function load(this, fname_or_str, varargin) function filtered_disparity_map = filter(this, disparity_map_left, disparity_map_right, left_view, varargin) %FILTER Apply filtering to the disparity map % - % filtered_disparity_map = obj.filter(disparity_map_left, disparity_map_right, left_view) - % filtered_disparity_map = obj.filter(..., 'OptionName',optionValue, ...) + % filtered_disparity_map = obj.filter(disparity_map_left, disparity_map_right, left_view) + % filtered_disparity_map = obj.filter(..., 'OptionName',optionValue, ...) % % ## Input % * **disparity_map_left** disparity map of the left view, - % 1 channel `int16` type. Implicitly assumes that disparity - % values are scaled by 16 (one-pixel disparity corresponds - % to the value of 16 in the disparity map). Disparity map - % can have any resolution, it will be automatically resized - % to fit `left_view` resolution. + % 1-channel `int16` type. Implicitly assumes that disparity + % values are scaled by 16 (one-pixel disparity corresponds to + % the value of 16 in the disparity map). Disparity map can have + % any resolution, it will be automatically resized to fit + % `left_view` resolution. % * **disparity_map_right** optional argument, some - % implementations might also use the disparity map of the - % right view to compute confidence maps, for instance. - % Pass an empty matrix `[]` if not used. + % implementations might also use the disparity map of the right + % view to compute confidence maps, for instance. Pass an empty + % matrix `[]` if not used. % * **left_view** left view of the original stereo-pair to guide - % the filtering process, 8-bit single-channel or - % three-channel image. + % the filtering process, 8-bit single-channel or three-channel + % image. % % ## Output % * **filtered_disparity_map** output disparity map. % % ## Options % * __ROI__ region of the disparity map to filter. Optional, - % usually it should be set automatically. Not set by - % default. + % usually it should be set automatically. Not set by default. % * __RightView__ optional argument, some implementations might - % also use the right view of the original stereo-pair. Not - % set by default. + % also use the right view of the original stereo-pair. Not set + % by default. % % See also: cv.DisparityWLSFilter.DisparityWLSFilter % @@ -226,14 +225,13 @@ function load(this, fname_or_str, varargin) function map = getConfidenceMap(this) %GETCONFIDENCEMAP Get the confidence map that was used in the last filter call % - % map = obj.getConfidenceMap() + % map = obj.getConfidenceMap() % % ## Output % * __map__ confidence map. It is a `single` one-channel image - % with values ranging from 0.0 (totally untrusted regions - % of the raw disparity map) to 255.0 (regions containing - % correct disparity values with a high degree of - % confidence). + % with values ranging from 0.0 (totally untrusted regions of the + % raw disparity map) to 255.0 (regions containing correct + % disparity values with a high degree of confidence). % % See also: cv.DisparityWLSFilter.getROI % @@ -243,7 +241,7 @@ function load(this, fname_or_str, varargin) function rect = getROI(this) %GETROI Get the ROI used in the last filter call % - % rect = obj.getROI() + % rect = obj.getROI() % % ## Output % * __rect__ region of interest `[x,y,w,h]`. @@ -290,20 +288,19 @@ function load(this, fname_or_str, varargin) function matcher_right = createRightMatcher(matcher_left) %CREATERIGHTMATCHER Convenience method to set up the matcher for computing the right-view disparity map that is required in case of filtering with confidence % - % matcher_right = cv.DisparityWLSFilter.createRightMatcher(matcher_left) + % matcher_right = cv.DisparityWLSFilter.createRightMatcher(matcher_left) % % ## Input - % * **matcher_left** main stereo matcher instance that will be - % used with the filter. An object of one of the following - % classes: - % * __cv.StereoBM__ stereo correspondence using the block - % matching algorithm. - % * __cv.StereoSGBM__ stereo correspondence using the - % semi-global block matching algorithm. + % * **matcher_left** main stereo matcher instance that will be used + % with the filter. An object of one of the following classes: + % * __cv.StereoBM__ stereo correspondence using the block + % matching algorithm. + % * __cv.StereoSGBM__ stereo correspondence using the + % semi-global block matching algorithm. % % ## Output % * **matcher_right** output right matcher. An object of same - % class as `matcher_left`. + % class as `matcher_left`. % % See also: cv.DisparityWLSFilter.DisparityWLSFilter, % cv.StereoBM, cv.StereoSGBM @@ -316,11 +313,11 @@ function load(this, fname_or_str, varargin) function dst = readGT(src_path) %READGT Function for reading ground truth disparity maps % - % dst = cv.DisparityWLSFilter.readGT(src_path) + % dst = cv.DisparityWLSFilter.readGT(src_path) % % ## Input % * **src_path** path to the image, containing ground-truth - % disparity map. + % disparity map. % % ## Output % * __dst__ output disparity map, `int16` depth. @@ -336,8 +333,8 @@ function load(this, fname_or_str, varargin) function mse = computeMSE(GT, src, varargin) %COMPUTEMSE Function for computing mean square error for disparity maps % - % mse = cv.DisparityWLSFilter.computeMSE(GT, src) - % mse = cv.DisparityWLSFilter.computeMSE(..., 'OptionName',optionValue, ...) + % mse = cv.DisparityWLSFilter.computeMSE(GT, src) + % mse = cv.DisparityWLSFilter.computeMSE(..., 'OptionName',optionValue, ...) % % ## Input % * __GT__ ground truth disparity map (`int16`). @@ -357,8 +354,8 @@ function load(this, fname_or_str, varargin) function prcnt = computeBadPixelPercent(GT, src, varargin) %COMPUTEBADPIXELPERCENT Function for computing the percent of "bad" pixels in the disparity map % - % prcnt = cv.DisparityWLSFilter.computeBadPixelPercent(GT, src) - % prcnt = cv.DisparityWLSFilter.computeBadPixelPercent(..., 'OptionName',optionValue, ...) + % prcnt = cv.DisparityWLSFilter.computeBadPixelPercent(GT, src) + % prcnt = cv.DisparityWLSFilter.computeBadPixelPercent(..., 'OptionName',optionValue, ...) % % ## Input % * __GT__ ground truth disparity map (`int16`). @@ -366,8 +363,8 @@ function load(this, fname_or_str, varargin) % % ## Output % * __prcnt__ returns percent of "bad" pixels between `GT` and - % `src` (pixels where error is higher than a specified - % threshold). + % `src` (pixels where error is higher than a specified + % threshold). % % ## Options % * __ROI__ region of interest `[x,y,w,h]`. Not set by default. @@ -382,8 +379,8 @@ function load(this, fname_or_str, varargin) function dst = getDisparityVis(src, varargin) %GETDISPARITYVIS Function for creating a disparity map visualization % - % dst = cv.DisparityWLSFilter.getDisparityVis(src) - % dst = cv.DisparityWLSFilter.getDisparityVis(src, 'OptionName',optionValue, ...) + % dst = cv.DisparityWLSFilter.getDisparityVis(src) + % dst = cv.DisparityWLSFilter.getDisparityVis(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ input disparity map (`int16` depth). @@ -393,7 +390,7 @@ function load(this, fname_or_str, varargin) % % ## Options % * __Scale__ disparity map will be multiplied by this value for - % visualization. default 1.0 + % visualization. default 1.0 % % See also: cv.DisparityWLSFilter.readGT % @@ -406,7 +403,7 @@ function load(this, fname_or_str, varargin) function C = StereoMatcher2Cell(matcher) %STEREOMATCHER2CELL Convert StereoMatcher object to a cell array of options % - % C = cv.DisparityWLSFilter.StereoMatcher2Cell(matcher) + % C = cv.DisparityWLSFilter.StereoMatcher2Cell(matcher) % % ## Input % * __matcher__ input matcher object. @@ -448,7 +445,7 @@ function load(this, fname_or_str, varargin) function matcher = Struct2StereoMatcher(S) %STRUCT2STEREOMATCHER Convert a struct of options to a StereoMatcher object % - % matcher = cv.DisparityWLSFilter.Struct2StereoMatcher(S) + % matcher = cv.DisparityWLSFilter.Struct2StereoMatcher(S) % % ## Input % * __S__ input struct. diff --git a/opencv_contrib/+cv/EdgeAwareInterpolator.m b/opencv_contrib/+cv/EdgeAwareInterpolator.m index 116f57c79..247979c2e 100644 --- a/opencv_contrib/+cv/EdgeAwareInterpolator.m +++ b/opencv_contrib/+cv/EdgeAwareInterpolator.m @@ -19,7 +19,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -57,7 +58,7 @@ function this = EdgeAwareInterpolator() %EDGEAWAREINTERPOLATOR Factory method that creates an instance of EdgeAwareInterpolator % - % obj = cv.EdgeAwareInterpolator() + % obj = cv.EdgeAwareInterpolator() % % See also: cv.EdgeAwareInterpolator.interpolate % @@ -67,7 +68,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.EdgeAwareInterpolator % @@ -81,7 +82,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.EdgeAwareInterpolator.empty, % cv.EdgeAwareInterpolator.load @@ -92,11 +93,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.EdgeAwareInterpolator.clear, % cv.EdgeAwareInterpolator.load @@ -107,7 +108,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +124,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +152,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.EdgeAwareInterpolator.save, % cv.EdgeAwareInterpolator.load @@ -169,24 +170,23 @@ function load(this, fname_or_str, varargin) function dense_flow = interpolate(this, from_image, from_points, to_image, to_points) %INTERPOLATE Interpolate input sparse matches % - % dense_flow = obj.interpolate(from_image, from_points, to_image, to_points) + % dense_flow = obj.interpolate(from_image, from_points, to_image, to_points) % % ## Input % * **from_image** first of the two matched images, 8-bit - % single-channel or three-channel. + % single-channel or three-channel. % * **from_points** points of the `from_image` for which there are - % correspondences in the `to_image` (a cell array of - % 2-element float vectors `{[x,y],..}`, size shouldn't - % exceed 32767). + % correspondences in the `to_image` (a cell array of 2-element + % float vectors `{[x,y],..}`, size shouldn't exceed 32767). % * **to_image** second of the two matched images, 8-bit - % single-channel or three-channel. + % single-channel or three-channel. % * **to_points** points in the `to_image` corresponding to - % `from_points` (a cell array of 2-element float vectors - % `{[x,y],..}`, size shouldn't exceed 32767). + % `from_points` (a cell array of 2-element float vectors + % `{[x,y],..}`, size shouldn't exceed 32767). % % ## Output % * **dense_flow** output dense matching (two-channel `single` - % image). + % image). % % See also: cv.EdgeAwareInterpolator.EdgeAwareInterpolator % diff --git a/opencv_contrib/+cv/FASTForPointSet.m b/opencv_contrib/+cv/FASTForPointSet.m new file mode 100644 index 000000000..1e29375b7 --- /dev/null +++ b/opencv_contrib/+cv/FASTForPointSet.m @@ -0,0 +1,39 @@ +%FASTFORPOINTSET Estimates cornerness for prespecified KeyPoints using the FAST algorithm +% +% keypoints = cv.FASTForPointSet(im, keypoints) +% keypoints = cv.FASTForPointSet(im, keypoints, 'OptionName', optionValue, ...) +% +% ## Input +% * __im__ 8-bit grayscale image where keypoints (corners) are to be detected. +% * __keypoints__ keypoints which should be tested to fit the FAST criteria. +% A 1-by-N structure array, with at least the "pt" field filled. In the +% output, keypoints not being detected as corners are removed. +% +% ## Output +% * __keypoints__ Keypoints detected on the image. With their "response" field +% calculated. +% +% ## Options +% * __Threshold__ Threshold on difference between intensity of the central +% pixel and pixels on a circle around this pixel. default 10. +% * __NonmaxSuppression__ If it is true, non-maximum supression is applied to +% detected corners (keypoints). default true. +% * __Type__ one of the three neighborhoods as defined in the paper: +% * **TYPE_9_16** (default) +% * **TYPE_7_12** +% * **TYPE_5_8** +% +% Detects corners using the FAST algorithm by [Rosten06]. +% +% Rational: Some applications only want to know if there are feature at +% specific locations. To fit these needs the cv.FAST approach is extended in +% cv.FASTForPointSet to recieve a vector of locations and calculates the FAST +% response on these positions. If it is below the threshold, it will be +% removed from the list. +% +% ## References +% [Rosten06]: +% > E. Rosten (Machine Learning for High-speed Corner Detection, 2006). +% +% See also: cv.FAST, cv.FastFeatureDetector +% diff --git a/opencv_contrib/+cv/FREAK.m b/opencv_contrib/+cv/FREAK.m index b5e911a92..c98291318 100644 --- a/opencv_contrib/+cv/FREAK.m +++ b/opencv_contrib/+cv/FREAK.m @@ -22,26 +22,27 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = FREAK(varargin) %FREAK Constructor % - % obj = cv.FREAK() - % obj = cv.FREAK(..., 'OptionName',optionValue, ...) + % obj = cv.FREAK() + % obj = cv.FREAK(..., 'OptionName',optionValue, ...) % % ## Options % * __OrientationNormalized__ Enable orientation normalization. - % default true + % default true % * __ScaleNormalized__ Enable scale normalization. default true % * __PatternScale__ Scaling of the description pattern. - % default 22.0 + % default 22.0 % * __NOctaves__ Number of octaves covered by the detected - % keypoints. default 4 + % keypoints. default 4 % * __SelectedPairs__ (Optional) user defined selected pairs - % indexes. Not set by default + % indexes. Not set by default % % See also: cv.FREAK.compute % @@ -51,7 +52,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FREAK % @@ -62,7 +63,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -76,7 +77,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FREAK.empty, cv.FREAK.load % @@ -86,11 +87,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.FREAK.clear, cv.FREAK.load % @@ -100,7 +101,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -116,21 +117,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -144,11 +145,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FREAK.save, cv.FREAK.load % @@ -161,16 +162,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for FREAK. % @@ -182,7 +183,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -197,7 +198,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -212,26 +213,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit/16-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.FREAK.FREAK diff --git a/opencv_contrib/+cv/FastGlobalSmootherFilter.m b/opencv_contrib/+cv/FastGlobalSmootherFilter.m index bd5e81da4..944bcf51a 100644 --- a/opencv_contrib/+cv/FastGlobalSmootherFilter.m +++ b/opencv_contrib/+cv/FastGlobalSmootherFilter.m @@ -20,31 +20,32 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = FastGlobalSmootherFilter(guide, varargin) %FASTGLOBALSMOOTHERFILTER Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines % - % obj = cv.FastGlobalSmootherFilter(guide) - % obj = cv.FastGlobalSmootherFilter(guide, 'OptionName',optionValue, ...) + % obj = cv.FastGlobalSmootherFilter(guide) + % obj = cv.FastGlobalSmootherFilter(guide, 'OptionName',optionValue, ...) % % ## Input % * __guide__ image serving as guide for filtering. It should have - % 8-bit depth and either 1 or 3 channels. + % 8-bit depth and either 1 or 3 channels. % % ## Options % * __Lambda__ parameter defining the amount of regularization. - % default 100.0 + % default 100.0 % * __SigmaColor__ parameter, that is similar to color space sigma - % in cv.bilateralFilter. default 5.0 + % in cv.bilateralFilter. default 5.0 % * __LambdaAttenuation__ internal parameter, defining how much - % lambda decreases after each iteration. Normally, it should - % be 0.25. Setting it to 1.0 may lead to streaking artifacts. - % default 0.25 + % lambda decreases after each iteration. Normally, it should be + % 0.25. Setting it to 1.0 may lead to streaking artifacts. + % default 0.25 % * __NumIter__ number of iterations used for filtering, 3 is - % usually enough. default 3 + % usually enough. default 3 % % For more details about Fast Global Smoother parameters, see the % original paper [Min2014]. However, please note that there are @@ -65,7 +66,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FastGlobalSmootherFilter % @@ -79,7 +80,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FastGlobalSmootherFilter.empty, % cv.FastGlobalSmootherFilter.load @@ -90,11 +91,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.FastGlobalSmootherFilter.clear, % cv.FastGlobalSmootherFilter.load @@ -105,7 +106,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -121,21 +122,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -149,11 +150,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FastGlobalSmootherFilter.save, % cv.FastGlobalSmootherFilter.load @@ -167,12 +168,12 @@ function load(this, fname_or_str, varargin) function dst = filter(this, src) %FILTER Apply smoothing operation to the source image % - % dst = obj.filter(src) + % dst = obj.filter(src) % % ## Input % * __src__ source image for filtering with unsigned 8-bit or - % signed 16-bit or floating-point 32-bit depth and up to 4 - % channels. + % signed 16-bit or floating-point 32-bit depth and up to 4 + % channels. % % ## Output % * __dst__ destination image. @@ -187,30 +188,30 @@ function load(this, fname_or_str, varargin) function dst = fastGlobalSmootherFilter(src, guide, varargin) %FASTGLOBALSMOOTHERFILTER Simple one-line Fast Global Smoother filter call % - % dst = cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter(src, guide) - % dst = cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter(src, guide, 'OptionName',optionValue, ...) + % dst = cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter(src, guide) + % dst = cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter(src, guide, 'OptionName',optionValue, ...) % % ## Input % * __src__ source image for filtering with unsigned 8-bit or - % signed 16-bit or floating-point 32-bit depth and up to 4 - % channels. + % signed 16-bit or floating-point 32-bit depth and up to 4 + % channels. % * __guide__ image serving as guide for filtering. It should have - % 8-bit depth and either 1 or 3 channels. + % 8-bit depth and either 1 or 3 channels. % % ## Output % * __dst__ destination image. % % ## Options % * __Lambda__ parameter defining the amount of regularization. - % default 100.0 + % default 100.0 % * __SigmaColor__ parameter, that is similar to color space sigma - % in cv.bilateralFilter. default 5.0 + % in cv.bilateralFilter. default 5.0 % * __LambdaAttenuation__ internal parameter, defining how much - % lambda decreases after each iteration. Normally, it should - % be 0.25. Setting it to 1.0 may lead to streaking artifacts. - % default 0.25 + % lambda decreases after each iteration. Normally, it should be + % 0.25. Setting it to 1.0 may lead to streaking artifacts. + % default 0.25 % * __NumIter__ number of iterations used for filtering, 3 is - % usually enough. default 3 + % usually enough. default 3 % % If you have multiple images to filter with the same guide then % use FastGlobalSmootherFilter interface to avoid extra diff --git a/opencv_contrib/+cv/FastHoughTransform.m b/opencv_contrib/+cv/FastHoughTransform.m index 33f44d15e..7c62d9db6 100644 --- a/opencv_contrib/+cv/FastHoughTransform.m +++ b/opencv_contrib/+cv/FastHoughTransform.m @@ -1,7 +1,7 @@ %FASTHOUGHTRANSFORM Calculates 2D Fast Hough transform of an image % -% dst = cv.FastHoughTransform(src) -% dst = cv.FastHoughTransform(src, 'OptionName',optionValue, ...) +% dst = cv.FastHoughTransform(src) +% dst = cv.FastHoughTransform(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ The source (input) image. @@ -11,55 +11,47 @@ % % ## Options % * __DDepth__ The depth of destination image. Default `int32`. -% * __Op__ The operation to be applied. -% This specifies binary operations, that is such ones which involve two -% operands. Formally, a binary operation `f` on a set `S` is a binary -% relation that maps elements of the Cartesian product `SxS` to `S`: -% `f: SxS -> S`. Default 'Addition'. One of -% * __Minimum__ Binary minimum operation. The constant specifies the -% binary minimum operation `f` that is defined as follows: -% `f(x, y) = min(x, y)`. -% * __Maximum__ Binary maximum operation. The constant specifies the -% binary maximum operation `f` that is defined as follows: -% `f(x, y) = max(x, y)`. -% * __Addition__ Binary addition operation. The constant specifies the -% binary addition operation `f` that is defined as follows: -% `f(x, y) = x + y`. -% * __Average__ Binary average operation. The constant specifies the -% binary average operation `f` that is defined as follows: -% `f(x, y) = (x + y)/2`. -% * __AngleRange__ The part of Hough space to calculate. -% This specifies the part of Hough space to calculate. Each member -% specifies primarily direction of lines (horizontal or vertical) and -% the direction of angle changes. Direction of angle changes is from -% multiples of 90 to odd multiples of 45. The image considered to be -% written top-down and left-to-right. Angles are started from vertical -% line and go clockwise. Separate quarters and halves are written in -% orientation they should be in full Hough space. Default `ARO_315_135`. -% One of: -% * **ARO_0_45** Vertical primarily direction and clockwise angle -% changes. -% * **ARO_45_90** Horizontal primarily direction and counterclockwise -% angle changes. -% * **ARO_90_135** Horizontal primarily direction and clockwise angle -% changes. -% * **ARO_315_0** Vertical primarily direction and counterclockwise -% angle changes. -% * **ARO_315_45** Vertical primarily direction. -% * **ARO_45_135** Horizontal primarily direction. -% * **ARO_315_135** Full set of directions. -% * **ARO_CTR_HOR** `90 +/- atan(0.5)`, interval approximately from -% `64.5` to `116.5` degrees. It is used for calculating Fast Hough -% Transform for images skewed by `atan(0.5)`. -% * **ARO_CTR_VER** `0 +/- atan(0.5)`, interval approximately from -% `333.5` (`-26.5`) to `26.5` degrees. It is used for calculating -% Fast Hough Transform for images skewed by `atan(0.5)`. +% * __Op__ The operation to be applied. This specifies binary operations, that +% is such ones which involve two operands. Formally, a binary operation `f` +% on a set `S` is a binary relation that maps elements of the Cartesian +% product `SxS` to `S`: `f: SxS -> S`. Default 'Addition'. One of +% * __Minimum__ Binary minimum operation. The constant specifies the binary +% minimum operation `f` that is defined as follows: `f(x, y) = min(x, y)`. +% * __Maximum__ Binary maximum operation. The constant specifies the binary +% maximum operation `f` that is defined as follows: `f(x, y) = max(x, y)`. +% * __Addition__ Binary addition operation. The constant specifies the binary +% addition operation `f` that is defined as follows: `f(x, y) = x + y`. +% * __Average__ Binary average operation. The constant specifies the binary +% average operation `f` that is defined as follows: `f(x, y) = (x + y)/2`. +% * __AngleRange__ The part of Hough space to calculate. Each member specifies +% primarily direction of lines (horizontal or vertical) and the direction of +% angle changes. Direction of angle changes is from multiples of 90 to odd +% multiples of 45. The image considered to be written top-down and +% left-to-right. Angles are started from vertical line and go clockwise. +% Separate quarters and halves are written in orientation they should be in +% full Hough space. Default `ARO_315_135`. One of: +% * **ARO_0_45** Vertical primarily direction and clockwise angle changes. +% * **ARO_45_90** Horizontal primarily direction and counterclockwise angle +% changes. +% * **ARO_90_135** Horizontal primarily direction and clockwise angle +% changes. +% * **ARO_315_0** Vertical primarily direction and counterclockwise angle +% changes. +% * **ARO_315_45** Vertical primarily direction. +% * **ARO_45_135** Horizontal primarily direction. +% * **ARO_315_135** Full set of directions. +% * **ARO_CTR_HOR** `90 +/- atan(0.5)`, interval approximately from `64.5` +% to `116.5` degrees. It is used for calculating Fast Hough Transform for +% images skewed by `atan(0.5)`. +% * **ARO_CTR_VER** `0 +/- atan(0.5)`, interval approximately from `333.5` +% (`-26.5`) to `26.5` degrees. It is used for calculating Fast Hough +% Transform for images skewed by `atan(0.5)`. % * __MakeSkew__ Specifies to do or not to do skewing of Hough transform -% image. The enum specifies to do or not to do skewing of Hough -% transform image so it would be no cycling in Hough transform image -% through borders of image. Default 'Deskew'. One of: -% * __Raw__ Use raw cyclic image. -% * __Deskew__ Prepare deskewed image. +% image. The enum specifies to do or not to do skewing of Hough transform +% image so it would be no cycling in Hough transform image through borders +% of image. Default 'Deskew'. One of: +% * __Raw__ Use raw cyclic image. +% * __Deskew__ Prepare deskewed image. % % The function calculates the fast Hough transform for full, half or quarter % range of angles. diff --git a/opencv_contrib/+cv/FastLineDetector.m b/opencv_contrib/+cv/FastLineDetector.m index 8ed2582c4..626700a4f 100644 --- a/opencv_contrib/+cv/FastLineDetector.m +++ b/opencv_contrib/+cv/FastLineDetector.m @@ -15,7 +15,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -23,23 +24,23 @@ function this = FastLineDetector(varargin) %FASTLINEDETECTOR Creates a FastLineDetector object and initializes it % - % obj = cv.FastLineDetector() - % obj = cv.FastLineDetector('OptionName',optionValue, ...) + % obj = cv.FastLineDetector() + % obj = cv.FastLineDetector('OptionName',optionValue, ...) % % ## Options % * __LengthThreshold__ Segment shorter than this will be - % discarded. default 10 + % discarded. default 10 % * __DistanceThreshold__ A point placed from a hypothesis line - % segment farther than this will be regarded as an outlier. - % default 1.41421356 + % segment farther than this will be regarded as an outlier. + % default 1.41421356 % * __CannyThreshold1__ First threshold for hysteresis procedure - % in cv.Canny. default 50 + % in cv.Canny. default 50 % * __CannyThreshold2__ Second threshold for hysteresis procedure - % in cv.Canny. default 50 + % in cv.Canny. default 50 % * __CannyApertureSize__ Aperturesize for the sobel operator in - % cv.Canny. default 3 + % cv.Canny. default 3 % * __DoMerge__ If true, incremental merging of segments will be - % perfomred. default false + % perfomred. default false % % See also: cv.FastLineDetector.detect % @@ -49,7 +50,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.FastLineDetector % @@ -63,28 +64,28 @@ function delete(this) function lines = detect(this, img) %DETECT Finds lines in the input image % - % lines = obj.detect(img) + % lines = obj.detect(img) % % ## Input % * __img__ A grayscale (`uint8`) input image. % % ## Output % * __lines__ A cell array of 4-elements vectors of the form - % `{[x1,y1, x2,y2], ..}` specifying the beginning and ending - % point of a line, where point 1 `[x1,y1]` is the start, - % point 2 `[x2,y2]` the end. Returned lines are directed so - % that the brighter side is on their left. + % `{[x1,y1, x2,y2], ..}` specifying the beginning and ending + % point of a line, where point 1 `[x1,y1]` is the start, point 2 + % `[x2,y2]` the end. Returned lines are directed so that the + % brighter side is on their left. % % An example output of the default parameters of the algorithm can % be seen here: % - % <> + % ![image](https://docs.opencv.org/3.3.1/corridor_fld.jpg) % % If only a ROI needs to be selected, use: % - % lines = obj.detect(cv.Rect.crop(image, roi)); - % lines = cat(1, lines{:}); - % lines = bsxfun(@plus, lines, roi); + % lines = obj.detect(cv.Rect.crop(image, roi)); + % lines = cat(1, lines{:}); + % lines = bsxfun(@plus, lines, roi); % % See also: cv.FastLineDetector.drawSegments % @@ -94,12 +95,12 @@ function delete(this) function img = drawSegments(this, img, lines, varargin) %DRAWSEGMENTS Draws the line segments on a given image % - % img = obj.drawSegments(img, lines) - % img = obj.drawSegments(img, lines, 'OptionName',optionValue, ...) + % img = obj.drawSegments(img, lines) + % img = obj.drawSegments(img, lines, 'OptionName',optionValue, ...) % % ## Input % * __img__ The image, where the lines will be drawn. Should be - % bigger or equal to the image, where the lines were found. + % bigger or equal to the image, where the lines were found. % * __lines__ A vector of the lines that needed to be drawn. % % ## Output @@ -119,7 +120,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.FastLineDetector.empty, cv.FastLineDetector.load % @@ -129,11 +130,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.FastLineDetector.clear, cv.FastLineDetector.load % @@ -143,7 +144,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -159,21 +160,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -187,11 +188,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.FastLineDetector.save, cv.FastLineDetector.load % diff --git a/opencv_contrib/+cv/GPCForest.m b/opencv_contrib/+cv/GPCForest.m index a62d8c84c..409b3e964 100644 --- a/opencv_contrib/+cv/GPCForest.m +++ b/opencv_contrib/+cv/GPCForest.m @@ -4,7 +4,7 @@ % From the following paper: % [PDF](http://research.microsoft.com/en-us/um/people/pkohli/papers/wfrik_cvpr2016.pdf). % - % ## Usage: + % ## Usage % - Train forest for the Global Patch Collider (or load a pretrained one). % - Find correspondences between two images using Global Patch Collider. % These can be used to perform optical flow matching and stereo @@ -25,7 +25,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -33,7 +34,7 @@ function this = GPCForest() %GPCFOREST Creates an instance of GPCForest % - % obj = cv.GPCForest() + % obj = cv.GPCForest() % % See also: cv.GPCForest.findCorrespondences % @@ -43,7 +44,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GPCForest % @@ -57,25 +58,25 @@ function delete(this) function train(this, imagesFrom, imagesTo, groundTruths, varargin) %TRAIN Train the forest using individual samples for each tree % - % obj.train(imagesFrom, imagesTo, groundTruths) - % obj.train(..., 'OptionName',optionValue, ...) + % obj.train(imagesFrom, imagesTo, groundTruths) + % obj.train(..., 'OptionName',optionValue, ...) % % ## Input % * __imagesFrom__ First sequence of images, a cell array of - % either filenames or 3-channel color images. + % either filenames or 3-channel color images. % * __imagesTo__ Second sequence of images, same size and format - % as `imagesFrom`. + % as `imagesFrom`. % * __groundTruth__ Ground thruth flows, either flow fields or - % filenames (see cv.readOpticalFlow). + % filenames (see cv.readOpticalFlow). % % ## Options % * __MaxTreeDepth__ Maximum tree depth to stop partitioning. - % default 20 + % default 20 % * __MinNumberOfSamples__ Minimum number of samples in the node - % to stop partitioning. default 3 + % to stop partitioning. default 3 % * __DescriptorType__ Type of descriptors to use. One of: - % * __DCT__ (default) Better quality but slow. - % * __WHT__ Worse quality but much faster. + % * __DCT__ (default) Better quality but slow. + % * __WHT__ Worse quality but much faster. % * __PrintProgress__ Print progress to stdout. default false % % Inputs form the training samples (pairs of images and ground @@ -90,8 +91,8 @@ function train(this, imagesFrom, imagesTo, groundTruths, varargin) function corrs = findCorrespondences(this, imgFrom, imgTo, varargin) %FINDCORRESPONDENCES Find correspondences between two images % - % corrs = obj.findCorrespondences(imgFrom, imgTo) - % corrs = obj.findCorrespondences(..., 'OptionName',optionValue, ...) + % corrs = obj.findCorrespondences(imgFrom, imgTo) + % corrs = obj.findCorrespondences(..., 'OptionName',optionValue, ...) % % ## Input % * __imgFrom__ First 3-channel image in a sequence. @@ -99,11 +100,11 @@ function train(this, imagesFrom, imagesTo, groundTruths, varargin) % % ## Output % * __corrs__ Output struct array with pairs of corresponding - % points. + % points. % % ## Options % * __UseOpenCL__ Whether to use OpenCL to speed up the matching. - % default false + % default false % % Options are the additional matching parameters for fine-tuning. % @@ -118,7 +119,7 @@ function train(this, imagesFrom, imagesTo, groundTruths, varargin) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GPCForest.empty % @@ -128,11 +129,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.GPCForest.clear % @@ -142,11 +143,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GPCForest.save, cv.GPCForest.load % @@ -156,7 +157,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -171,21 +172,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/GradientDeriche.m b/opencv_contrib/+cv/GradientDeriche.m index a59f90cf6..297ad130c 100644 --- a/opencv_contrib/+cv/GradientDeriche.m +++ b/opencv_contrib/+cv/GradientDeriche.m @@ -1,13 +1,13 @@ %GRADIENTDERICHE Applies Deriche filter to an image % -% dst = cv.GradientDeriche(op, dir) -% dst = cv.GradientDeriche(op, dir, 'OptionName',optionValue, ...) +% dst = cv.GradientDeriche(op, dir) +% dst = cv.GradientDeriche(op, dir, 'OptionName',optionValue, ...) % % ## Input % * __op__ Source 8-bit or 16-bit image, 1-channel or 3-channel image. % * __dir__ Filter direction. One of: -% * __X__ -% * __Y__ +% * __X__ +% * __Y__ % % ## Output % * __dst__ result `single` image with same number of channel than `op`. diff --git a/opencv_contrib/+cv/GradientPaillou.m b/opencv_contrib/+cv/GradientPaillou.m index a1be8ebb7..ffce07ea4 100644 --- a/opencv_contrib/+cv/GradientPaillou.m +++ b/opencv_contrib/+cv/GradientPaillou.m @@ -1,13 +1,13 @@ %GRADIENTPAILLOU Applies Paillou filter to an image % -% dst = cv.GradientPaillou(op, dir) -% dst = cv.GradientPaillou(op, dir, 'OptionName',optionValue, ...) +% dst = cv.GradientPaillou(op, dir) +% dst = cv.GradientPaillou(op, dir, 'OptionName',optionValue, ...) % % ## Input % * __op__ Source 8-bit or 16-bit image, 1-channel or 3-channel image. % * __dir__ Gradient direction. One of: -% * __X__ -% * __Y__ +% * __X__ +% * __Y__ % % ## Output % * __dst__ result `single` image with same number of channel than `op`. diff --git a/opencv_contrib/+cv/GraphSegmentation.m b/opencv_contrib/+cv/GraphSegmentation.m index 2f3b1a925..69accabf3 100644 --- a/opencv_contrib/+cv/GraphSegmentation.m +++ b/opencv_contrib/+cv/GraphSegmentation.m @@ -12,7 +12,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -28,12 +29,12 @@ function this = GraphSegmentation(varargin) %GRAPHSEGMENTATION Creates a graph based segmentor % - % obj = cv.GraphSegmentation() - % obj = cv.GraphSegmentation('OptionName',optionValue, ...) + % obj = cv.GraphSegmentation() + % obj = cv.GraphSegmentation('OptionName',optionValue, ...) % % ## Options % * __Sigma__ The sigma parameter, used to smooth image. - % default 0.5 + % default 0.5 % * __K__ The k parameter of the algorithm. default 300 % * __MinSize__ The minimum size of segments. default 100 % @@ -45,7 +46,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GraphSegmentation % @@ -59,7 +60,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GraphSegmentation.empty, cv.GraphSegmentation.load % @@ -69,11 +70,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.GraphSegmentation.clear, cv.GraphSegmentation.load % @@ -83,7 +84,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -99,21 +100,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -127,11 +128,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GraphSegmentation.save, cv.GraphSegmentation.load % @@ -144,16 +145,16 @@ function load(this, fname_or_str, varargin) function dst = processImage(this, src) %PROCESSIMAGE Segment an image and store output in dst % - % dst = obj.processImage(src) + % dst = obj.processImage(src) % % ## Input % * __src__ The input image. Any number of channel (1 (Eg: Gray), - % 3 (Eg: RGB), 4 (Eg: RGB-D)) can be provided. + % 3 (Eg: RGB), 4 (Eg: RGB-D)) can be provided. % % ## Output % * __dst__ The output segmentation. It's a `int32` matrix with - % the same number of cols and rows as input image, with an - % unique, sequential, id for each pixel. + % the same number of cols and rows as input image, with a + % unique sequential id for each pixel. % % See also: cv.GraphSegmentation.GraphSegmentation % diff --git a/opencv_contrib/+cv/GrayworldWB.m b/opencv_contrib/+cv/GrayworldWB.m index 0d915e965..ce7f8a5f5 100644 --- a/opencv_contrib/+cv/GrayworldWB.m +++ b/opencv_contrib/+cv/GrayworldWB.m @@ -12,7 +12,7 @@ % Saturation is calculated using the following for a 3-channel RGB image % per pixel `I` and is in the range `[0,1]`: % - % Saturation[I] = (max(R,G,B) - min(R,G,B)) / max(R,G,B) + % Saturation[I] = (max(R,G,B) - min(R,G,B)) / max(R,G,B) % % A threshold of 1 means that all pixels are used to white-balance, while % a threshold of 0 means no pixels are used. Lower thresholds are useful @@ -24,7 +24,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -38,7 +39,7 @@ function this = GrayworldWB() %GRAYWORLDWB Creates an instance of GrayworldWB % - % obj = cv.GrayworldWB() + % obj = cv.GrayworldWB() % % See also: cv.GrayworldWB.balanceWhite % @@ -48,7 +49,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GrayworldWB % @@ -62,7 +63,7 @@ function delete(this) function dst = balanceWhite(this, src) %BALANCEWHITE Applies white balancing to the input image % - % dst = obj.balanceWhite(src) + % dst = obj.balanceWhite(src) % % ## Input % * __src__ Input image, `uint8` or `uint16` color image. @@ -81,7 +82,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GrayworldWB.empty % @@ -91,11 +92,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.GrayworldWB.clear % @@ -105,11 +106,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GrayworldWB.save, cv.GrayworldWB.load % @@ -119,7 +120,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -134,21 +135,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/GuidedFilter.m b/opencv_contrib/+cv/GuidedFilter.m index 9357efeb9..83cbaf83c 100644 --- a/opencv_contrib/+cv/GuidedFilter.m +++ b/opencv_contrib/+cv/GuidedFilter.m @@ -12,25 +12,26 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = GuidedFilter(guide, varargin) %GUIDEDFILTER Factory method, create instance of GuidedFilter and produce initialization routines % - % obj = cv.GuidedFilter(guide) - % obj = cv.GuidedFilter(guide, 'OptionName',optionValue, ...) + % obj = cv.GuidedFilter(guide) + % obj = cv.GuidedFilter(guide, 'OptionName',optionValue, ...) % % ## Input % * __guide__ guided image with up to 3 channels, if it have more - % then 3 channels then only first 3 channels will be used. + % then 3 channels then only first 3 channels will be used. % % ## Options % * __Radius__ radius of Guided Filter. default 7 % * __EPS__ regularization term of Guided Filter. `eps^2` is - % similar to the sigma in the color space into - % cv.bilateralFilter. default 500.0 + % similar to the sigma in the color space into + % cv.bilateralFilter. default 500.0 % % For more details about Guided Filter parameters, see the % original article [Kaiming10]. @@ -43,7 +44,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.GuidedFilter % @@ -57,7 +58,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.GuidedFilter.empty, cv.GuidedFilter.load % @@ -67,11 +68,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.GuidedFilter.clear, cv.GuidedFilter.load % @@ -81,7 +82,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -97,21 +98,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -125,11 +126,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.GuidedFilter.save, cv.GuidedFilter.load % @@ -142,8 +143,8 @@ function load(this, fname_or_str, varargin) function dst = filter(this, src, varargin) %FILTER Apply Guided Filter to the filtering image % - % dst = obj.filter(src) - % dst = obj.filter(src, 'OptionName',optionValue, ...) + % dst = obj.filter(src) + % dst = obj.filter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ filtering image with any numbers of channels. @@ -153,8 +154,7 @@ function load(this, fname_or_str, varargin) % % ## Options % * __DDepth__ optional depth of the output image. `DDepth` can be - % set to -1, which will be equivalent to `class(src)`. - % Default -1 + % set to -1, which will be equivalent to `class(src)`. Default -1 % % See also: cv.GuidedFilter.GuidedFilter, % cv.GuidedFilter.guidedFilter @@ -167,13 +167,13 @@ function load(this, fname_or_str, varargin) function dst = guidedFilter(src, guide, varargin) %GUIDEDFILTER Simple one-line Guided Filter call % - % dst = cv.GuidedFilter.guidedFilter(src, guide) - % dst = cv.GuidedFilter.guidedFilter(src, guide, 'OptionName',optionValue, ...) + % dst = cv.GuidedFilter.guidedFilter(src, guide) + % dst = cv.GuidedFilter.guidedFilter(src, guide, 'OptionName',optionValue, ...) % % ## Input % * __src__ filtering image with any numbers of channels. % * __guide__ guided image with up to 3 channels, if it have more - % then 3 channels then only first 3 channels will be used. + % then 3 channels then only first 3 channels will be used. % % ## Output % * __dst__ output image. @@ -181,11 +181,10 @@ function load(this, fname_or_str, varargin) % ## Options % * __Radius__ radius of Guided Filter. default 7 % * __EPS__ regularization term of Guided Filter. `eps^2` is - % similar to the sigma in the color space into - % cv.bilateralFilter. default 500.0 + % similar to the sigma in the color space into + % cv.bilateralFilter. default 500.0 % * __DDepth__ optional depth of the output image. `DDepth` can be - % set to -1, which will be equivalent to `class(src)`. - % Default -1 + % set to -1, which will be equivalent to `class(src)`. Default -1 % % If you have multiple images to filter with the same guided image % then use GuidedFilter interface to avoid extra computations on diff --git a/opencv_contrib/+cv/HarrisLaplaceFeatureDetector.m b/opencv_contrib/+cv/HarrisLaplaceFeatureDetector.m index 467c49b41..c0cd89b59 100644 --- a/opencv_contrib/+cv/HarrisLaplaceFeatureDetector.m +++ b/opencv_contrib/+cv/HarrisLaplaceFeatureDetector.m @@ -14,27 +14,28 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = HarrisLaplaceFeatureDetector(varargin) %HARRISLAPLACEFEATUREDETECTOR The full constructor % - % obj = cv.HarrisLaplaceFeatureDetector() - % obj = cv.HarrisLaplaceFeatureDetector('OptionName',optionValue, ...) + % obj = cv.HarrisLaplaceFeatureDetector() + % obj = cv.HarrisLaplaceFeatureDetector('OptionName',optionValue, ...) % % ## Options % * __NumOctaves__ the number of octaves in the scale-space - % pyramid. default 6 + % pyramid. default 6 % * __CornThresh__ the threshold for the Harris cornerness - % measure. default 0.01 + % measure. default 0.01 % * __DOGThresh__ the threshold for the Difference-of-Gaussians - % scale selection. default 0.01 + % scale selection. default 0.01 % * __MaxCorners__ the maximum number of corners to consider. - % default 5000 + % default 5000 % * __NumLayers__ the number of intermediate scales per octave. - % default 4 + % default 4 % % See also: cv.HarrisLaplaceFeatureDetector.detect % @@ -44,7 +45,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.HarrisLaplaceFeatureDetector % @@ -55,7 +56,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -69,7 +70,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.HarrisLaplaceFeatureDetector.empty, % cv.HarrisLaplaceFeatureDetector.load @@ -80,11 +81,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.HarrisLaplaceFeatureDetector.clear % @@ -94,7 +95,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -110,21 +111,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -138,11 +139,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.HarrisLaplaceFeatureDetector.save, % cv.HarrisLaplaceFeatureDetector.load @@ -156,27 +157,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.HarrisLaplaceFeatureDetector.HarrisLaplaceFeatureDetector % diff --git a/opencv_contrib/+cv/HoughPoint2Line.m b/opencv_contrib/+cv/HoughPoint2Line.m index b452ba9db..885a38239 100644 --- a/opencv_contrib/+cv/HoughPoint2Line.m +++ b/opencv_contrib/+cv/HoughPoint2Line.m @@ -1,7 +1,7 @@ %HOUGHPOINT2LINE Calculates coordinates of line segment corresponded by point in Hough space % -% line = cv.HoughPoint2Line(houghPoint, srcImgInfo) -% line = cv.HoughPoint2Line(..., 'OptionName',optionValue, ...) +% line = cv.HoughPoint2Line(houghPoint, srcImgInfo) +% line = cv.HoughPoint2Line(..., 'OptionName',optionValue, ...) % % ## Input % * __houghPoint__ Point in Hough space `[x,y]`. @@ -9,24 +9,23 @@ % % ## Output % * __line__ Coordinates of line segment corresponded by point in Hough space, -% a 4-element integer vector `[vx,vy, ux,uy]`. +% a 4-element integer vector `[vx,vy, ux,uy]`. % % ## Options % * __AngleRange__ The part of Hough space where point is situated. See -% cv.FastHoughTransform, default `ARO_315_135`. +% cv.FastHoughTransform, default `ARO_315_135`. % * __MakeSkew__ Specifies to do or not to do image skewing. See -% cv.FastHoughTransform, default 'Deskew'. +% cv.FastHoughTransform, default 'Deskew'. % * __Rules__ Specifies strictness of line segment calculating. This specifies -% the degree of rules validation. This can be used, for example, to -% choose a proper way of input arguments validation. -% Default 'IgnoreBorders'. One of: -% * __Strict__ Validate each rule in a proper way. -% * __IgnoreBorders__ Skip validations of image borders. +% the degree of rules validation. This can be used, for example, to choose +% a proper way of input arguments validation. Default 'IgnoreBorders': +% * __Strict__ Validate each rule in a proper way. +% * __IgnoreBorders__ Skip validations of image borders. % % The function calculates coordinates of line segment corresponded by point in % Hough space. % -% ## Notes +% ### Notes % - If `Rules` parameter set to 'Strict' then returned line cut along the % border of source image. % - If `Rules` parameter set to 'IgnoreBorders' then in case of point, which diff --git a/opencv_contrib/+cv/ImgHash.m b/opencv_contrib/+cv/ImgHash.m index c665012fd..55ccccca5 100644 --- a/opencv_contrib/+cv/ImgHash.m +++ b/opencv_contrib/+cv/ImgHash.m @@ -20,12 +20,12 @@ % % ### Performance under different attacks % - % ![Performance chart](http://docs.opencv.org/3.3.0/attack_performance.JPG) + % ![Performance chart](https://docs.opencv.org/3.3.0/attack_performance.JPG) % % ### Speed comparison with PHash library (100 images from ukbench) % - % ![Hash Computation chart](http://docs.opencv.org/3.3.0/hash_computation_chart.JPG) - % ![Hash comparison chart](http://docs.opencv.org/3.3.0/hash_comparison_chart.JPG) + % ![Hash Computation chart](https://docs.opencv.org/3.3.1/hash_computation_chart.JPG) + % ![Hash comparison chart](https://docs.opencv.org/3.3.1/hash_comparison_chart.JPG) % % As you can see, hash computation speed of img_hash module outperform % [PHash library](http://www.phash.org/) a lot. @@ -54,7 +54,7 @@ % % ### Contributors % - % Tham Ngap Wei, thamngapwei@gmail.com + % [Tham Ngap Wei](mailto:thamngapwei@gmail.com) % % ## References % [lookslikeit]: @@ -71,8 +71,10 @@ % properties (SetAccess = private) - id % Object ID - klass % Object class + % Object ID + id + % Object class + klass end %% Constructor/destructor @@ -80,28 +82,28 @@ function this = ImgHash(alg, varargin) %IMGHASH Constructor % - % obj = cv.ImgHash(alg) - % obj = cv.ImgHash(alg, 'OptionName',optionValue, ...) + % obj = cv.ImgHash(alg) + % obj = cv.ImgHash(alg, 'OptionName',optionValue, ...) % % ## Input % * __alg__ image hash algorithm, one of: - % * __AverageHash__ Computes average hash value of the input - % image. This is a fast image hashing algorithm, but - % only work on simple case. For more details, please - % refer to [lookslikeit]. - % * __BlockMeanHash__ Image hash based on block mean. - % See [zauner2010implementation] for details. - % * __ColorMomentHash__ Image hash based on color moments. - % See [tang2012perceptual] for details. - % * __MarrHildrethHash__ Marr-Hildreth Operator Based Hash, - % slowest but more discriminative. - % See [zauner2010implementation] for details. - % * __PHash__ Slower than average_hash, but tolerant of - % minor modifications. This algorithm can combat more - % variation than AverageHash, for more details please - % refer to [lookslikeit]. - % * __RadialVarianceHash__ Image hash based on Radon - % transform. See [tang2012perceptual] for details. + % * __AverageHash__ Computes average hash value of the input + % image. This is a fast image hashing algorithm, but only work + % on simple case. For more details, please refer to + % [lookslikeit]. + % * __BlockMeanHash__ Image hash based on block mean. See + % [zauner2010implementation] for details. + % * __ColorMomentHash__ Image hash based on color moments. See + % [tang2012perceptual] for details. + % * __MarrHildrethHash__ Marr-Hildreth Operator Based Hash, + % slowest but more discriminative. See + % [zauner2010implementation] for details. + % * __PHash__ Slower than average_hash, but tolerant of minor + % modifications. This algorithm can combat more variation than + % than AverageHash, for more details please refer to + % [lookslikeit]. + % * __RadialVarianceHash__ Image hash based on Radon transform. + % See [tang2012perceptual] for details. % % ## Options % The following are options for the various algorithms: @@ -126,7 +128,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ImgHash % @@ -137,7 +139,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -151,7 +153,7 @@ function delete(this) function hash = compute(this, img) %COMPUTE Computes hash of the input image % - % hash = obj.compute(img) + % hash = obj.compute(img) % % ## Input % * __img__ input image want to compute hash value. @@ -167,7 +169,7 @@ function delete(this) function val = compare(this, hashOne, hashTwo) %COMPARE Compare two hash values % - % val = obj.compare(hashOne, hashTwo) + % val = obj.compare(hashOne, hashTwo) % % ## Input % * __hashOne__ Hash value one. @@ -175,7 +177,7 @@ function delete(this) % % ## Output % * __val__ indicate similarity between the two hashes, the - % meaning of the value vary from algorithm to algorithm. + % meaning of the value vary from algorithm to algorithm. % % See also: cv.ImgHash.compute % @@ -188,15 +190,15 @@ function delete(this) function hash = averageHash(img) %AVERAGEHASH Calculates the average hash in one call % - % hash = cv.ImgHash.averageHash(img) + % hash = cv.ImgHash.averageHash(img) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8` with 1/3/4 channels. + % `uint8` with 1/3/4 channels. % % ## Output % * __hash__ Hash value of input, it will contain 16 hex decimal - % number, return type is `uint8` + % number, return type is `uint8` % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % @@ -206,23 +208,23 @@ function delete(this) function hash = blockMeanHash(img, varargin) %BLOCKMEANHASH Computes block mean hash of the input image % - % hash = cv.ImgHash.blockMeanHash(img) - % hash = cv.ImgHash.blockMeanHash(img, 'OptionName',optionValue, ...) + % hash = cv.ImgHash.blockMeanHash(img) + % hash = cv.ImgHash.blockMeanHash(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8` with 1/3/4 channels. + % `uint8` with 1/3/4 channels. % % ## Output % * __hash__ Hash value of input, it will contain 16 hex decimal - % number, return type is `uint8`. + % number, return type is `uint8`. % % ## Options % * __Mode__ block mean hash mode, one of: - % * __Mode0__ (default) use fewer blocks and generates - % 16*16/8 `uint8` hash values. - % * __Mode1__ use block blocks (step_sizes/2) and generates - % fix(31*31/8)+1 `uint8` hash values. + % * __Mode0__ (default) use fewer blocks and generates 16*16/8 + % `uint8` hash values. + % * __Mode1__ use block blocks (step_sizes/2) and generates + % `fix(31*31/8)+1` `uint8` hash values. % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % @@ -232,11 +234,11 @@ function delete(this) function hash = colorMomentHash(img) %COLORMOMENTHASH Computes color moment hash of the input % - % hash = cv.ImgHash.colorMomentHash(img) + % hash = cv.ImgHash.colorMomentHash(img) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8` with 1/3/4 channels. + % `uint8` with 1/3/4 channels. % % ## Output % * __hash__ 42 hash values with type `double`. @@ -251,16 +253,16 @@ function delete(this) function hash = marrHildrethHash(img, varargin) %MARRHILDRETHHASH Computes average hash value of the input image % - % hash = cv.ImgHash.marrHildrethHash(img) - % hash = cv.ImgHash.marrHildrethHash(img, 'OptionName',optionValue, ...) + % hash = cv.ImgHash.marrHildrethHash(img) + % hash = cv.ImgHash.marrHildrethHash(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8` with 1/3/4 channels. + % `uint8` with 1/3/4 channels. % % ## Output % * __hash__ Hash value of input, it will contain 16 hex decimal - % number, return type is `uint8`. + % number, return type is `uint8`. % % ## Options % * __Alpha__ scale factor for marr wavelet. default 2 @@ -274,11 +276,11 @@ function delete(this) function hash = pHash(img) %PHASH Computes pHash value of the input image % - % hash = cv.ImgHash.pHash(img) + % hash = cv.ImgHash.pHash(img) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8` with 1/3/4 channels. + % `uint8` with 1/3/4 channels. % % ## Output % * __hash__ Hash value of input, it will contain 8 `uint8` values. @@ -291,12 +293,12 @@ function delete(this) function hash = radialVarianceHash(img, varargin) %RADIALVARIANCEHASH Computes radial variance hash of the input image % - % hash = cv.ImgHash.radialVarianceHash(img) - % hash = cv.ImgHash.radialVarianceHash(img, 'OptionName',optionValue, ...) + % hash = cv.ImgHash.radialVarianceHash(img) + % hash = cv.ImgHash.radialVarianceHash(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ input image want to compute hash value, type should be - % `uint8`, with 1/3/4 channels. + % `uint8`, with 1/3/4 channels. % % ## Output % * __hash__ Hash value of input, contains 40 `uint8` values. @@ -304,7 +306,7 @@ function delete(this) % ## Options % * __Sigma__ Gaussian kernel standard deviation. default 1 % * __NumOfAngleLine__ The number of angles to consider. - % default 180 + % default 180 % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % diff --git a/opencv_contrib/+cv/LATCH.m b/opencv_contrib/+cv/LATCH.m index b2ac7957f..5717f7ce5 100644 --- a/opencv_contrib/+cv/LATCH.m +++ b/opencv_contrib/+cv/LATCH.m @@ -19,25 +19,26 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = LATCH(varargin) %LATCH Constructor % - % obj = cv.LATCH() - % obj = cv.LATCH(..., 'OptionName',optionValue, ...) + % obj = cv.LATCH() + % obj = cv.LATCH(..., 'OptionName',optionValue, ...) % % ## Options % * __Bytes__ the size of the descriptor. Can be 64, 32, 16, 8, 4, - % 2 or 1. default 32 + % 2 or 1. default 32 % * __RotationInvariance__ whether or not the descriptor should - % compansate for orientation changes. default true + % compansate for orientation changes. default true % * __HalfSize__ the size of half of the mini-patches size. For - % example, if we would like to compare triplets of patches of - % size 7x7x then the `half_ssd_size` should be `(7-1)/2 = 3`. - % default 3 + % example, if we would like to compare triplets of patches of + % size 7x7x then the `half_ssd_size` should be `(7-1)/2 = 3`. + % default 3 % % See also: cv.LATCH.compute % @@ -47,7 +48,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LATCH % @@ -58,7 +59,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -72,7 +73,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LATCH.empty, cv.LATCH.load % @@ -82,11 +83,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.LATCH.clear, cv.LATCH.load % @@ -96,7 +97,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -112,21 +113,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -140,11 +141,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LATCH.save, cv.LATCH.load % @@ -157,16 +158,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for LATCH. % @@ -178,11 +179,11 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size, as specified in `Bytes` argument in - % constructor. + % constructor. % % See also: cv.LATCH.descriptorType, cv.LATCH.compute % @@ -192,7 +193,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -207,26 +208,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.LATCH.LATCH diff --git a/opencv_contrib/+cv/LBPHFaceRecognizer.m b/opencv_contrib/+cv/LBPHFaceRecognizer.m index 4beed8bb8..e25de50ef 100644 --- a/opencv_contrib/+cv/LBPHFaceRecognizer.m +++ b/opencv_contrib/+cv/LBPHFaceRecognizer.m @@ -28,32 +28,32 @@ % Here is an example of setting a threshold for the Eigenfaces method, % when creating the model: % - % % Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0 - % num_components = 10; - % threshold = 10.0; - % % Then if you want to have a face recognizer with a confidence threshold, - % % create the concrete implementation with the appropiate parameters: - % model = cv.BasicFaceRecognizer('Eigenfaces', ... - % 'NumComponents',num_components, 'Threshold',threshold); + % % Let's say we want to keep 10 Eigenfaces and have a threshold value of 10.0 + % num_components = 10; + % threshold = 10.0; + % % Then if you want to have a face recognizer with a confidence threshold, + % % create the concrete implementation with the appropiate parameters: + % model = cv.BasicFaceRecognizer('Eigenfaces', ... + % 'NumComponents',num_components, 'Threshold',threshold); % % Sometimes it's impossible to train the model, just to experiment with % threshold values. It's possible to set model thresholds during runtime. % Let's see how we would set/get the prediction for the Eigenface model, % we've created above: % - % % The following line reads the threshold from the Eigenfaces model: - % current_threshold = model.Threshold; - % % And this line sets the threshold to 0.0: - % model.Threshold = 0.0; + % % The following line reads the threshold from the Eigenfaces model: + % current_threshold = model.Threshold; + % % And this line sets the threshold to 0.0: + % model.Threshold = 0.0; % % If you've set the threshold to 0.0 as we did above, then: % - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % Get a prediction from the model. Note: We've set a threshold of 0.0 above, - % % since the distance is almost always larger than 0.0, you'll get -1 as - % % label, which indicates, this face is unknown - % predicted_label = model.predict(img); - % % ... + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % Get a prediction from the model. Note: We've set a threshold of 0.0 above, + % % since the distance is almost always larger than 0.0, you'll get -1 as + % % label, which indicates, this face is unknown + % predicted_label = model.predict(img); + % % ... % % is going to yield -1 as predicted label, which states this face is % unknown. @@ -68,7 +68,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -89,26 +90,27 @@ function this = LBPHFaceRecognizer(varargin) %LBPHFACERECOGNIZER Constructor % - % obj = cv.LBPHFaceRecognizer('OptionName',optionValue, ...) + % obj = cv.LBPHFaceRecognizer('OptionName',optionValue, ...) % % ## Options % * __Radius__ The radius used for building the Circular Local - % Binary Pattern. The greater the radius, the.. default 1 + % Binary Pattern. The greater the radius, the smoother the image + % but more spatial information you can get. default 1 % * __Neighbors__ The number of sample points to build a Circular - % Local Binary Pattern from. An appropriate value is to use - % 8 sample points. Keep in mind: the more sample points you - % include, the higher the computational cost. default 8 - % * __GridX__ The number of cells in the horizontal direction, - % 8 is a common value used in publications. The more cells, - % the finer the grid, the higher the dimensionality of the - % resulting feature vector. default 8 + % Local Binary Pattern from. An appropriate value is to use 8 + % sample points. Keep in mind: the more sample points you + % include, the higher the computational cost. default 8 + % * __GridX__ The number of cells in the horizontal direction, 8 + % is a common value used in publications. The more cells, the + % finer the grid, the higher the dimensionality of the resulting + % feature vector. default 8 % * __GridY__ The number of cells in the vertical direction, 8 is - % a common value used in publications. The more cells, the - % finer the grid, the higher the dimensionality of the - % resulting feature vector. default 8 + % a common value used in publications. The more cells, the finer + % the grid, the higher the dimensionality of the resulting + % feature vector. default 8 % * __Threshold__ The threshold applied in the prediction. If the - % distance to the nearest neighbor is larger than the - % threshold, the prediction returns -1. default `realmax` + % distance to the nearest neighbor is larger than the threshold, + % the prediction returns -1. default `realmax` % % Initializes this LBPH Model. The current implementation is % rather fixed as it uses the Extended Local Binary Patterns per @@ -119,7 +121,6 @@ % spatial histograms. % % ### Notes - % % - The Circular Local Binary Patterns (used in training and % prediction) expect the data given as grayscale images, use % cv.cvtColor to convert between the color spaces. @@ -133,7 +134,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LBPHFaceRecognizer % @@ -144,7 +145,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -158,7 +159,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LBPHFaceRecognizer.empty, cv.LBPHFaceRecognizer.load % @@ -168,11 +169,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.LBPHFaceRecognizer.clear, cv.LBPHFaceRecognizer.load % @@ -182,16 +183,16 @@ function clear(this) function varargout = save(this, filename) %SAVE Saves a FaceRecognizer and its model state % - % obj.save(filename) - % str = obj.save(filename) + % obj.save(filename) + % str = obj.save(filename) % % ## Input % * __filename__ The filename to store this FaceRecognizer to - % (either XML/YAML). + % (either XML/YAML). % % ## Output % * __str__ optional output. If requested, the model is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % Saves this model to a given filename, either as XML or YAML. % @@ -205,19 +206,19 @@ function clear(this) function load(this, fname_or_str, varargin) %LOAD Loads a FaceRecognizer and its model state % - % obj.load(filename) - % obj.load(str, 'FromString',true) + % obj.load(fname) + % obj.load(str, 'FromString',true) % % ## Input - % * __filename__ The filename to load this FaceRecognizer from - % (either XML/YAML). + % * __fname__ The filename to load this FaceRecognizer from + % (either XML/YAML). % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % Loads a persisted model and state from a given XML or YAML file. % @@ -229,11 +230,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LBPHFaceRecognizer.save, cv.LBPHFaceRecognizer.load % @@ -246,14 +247,13 @@ function load(this, fname_or_str, varargin) function train(this, src, labels) %TRAIN Trains a FaceRecognizer with given data and associated labels % - % obj.train(src, labels) + % obj.train(src, labels) % % ## Input % * __src__ The training images, that means the faces you want to - % learn. The data has to be given as a cell array of - % matrices. + % learn. The data has to be given as a cell array of matrices. % * __labels__ The labels corresponding to the images. have to be - % given as an integer vector. + % given as an integer vector. % % The following source code snippet shows you how to learn a % Fisherfaces model on a given set of images. The images are read @@ -264,39 +264,39 @@ function train(this, src, labels) % FaceRecognizer you don't have to pay any attention to the order % of the labels, just make sure same persons have the same label: % - % % holds images and labels - % images = {}; - % labels = []; - % % images for first person - % images{end+1} = cv.imread('person0/0.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % images{end+1} = cv.imread('person0/1.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % images{end+1} = cv.imread('person0/2.jpg', 'Grayscale',true); - % labels{end+1} = 0; - % % images for second person - % images{end+1} = cv.imread('person1/0.jpg', 'Grayscale',true); - % labels{end+1} = 1; - % images{end+1} = cv.imread('person1/1.jpg', 'Grayscale',true); - % labels{end+1} = 1; - % images{end+1} = cv.imread('person1/2.jpg', 'Grayscale',true); - % labels{end+1} = 1; + % % holds images and labels + % images = {}; + % labels = []; + % % images for first person + % images{end+1} = cv.imread('person0/0.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % images{end+1} = cv.imread('person0/1.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % images{end+1} = cv.imread('person0/2.jpg', 'Grayscale',true); + % labels{end+1} = 0; + % % images for second person + % images{end+1} = cv.imread('person1/0.jpg', 'Grayscale',true); + % labels{end+1} = 1; + % images{end+1} = cv.imread('person1/1.jpg', 'Grayscale',true); + % labels{end+1} = 1; + % images{end+1} = cv.imread('person1/2.jpg', 'Grayscale',true); + % labels{end+1} = 1; % % Now that you have read some images, we can create a new % FaceRecognizer. In this example I'll create a Fisherfaces model % and decide to keep all of the possible Fisherfaces: % - % % Create a new Fisherfaces model and retain all available - % % Fisherfaces, this is the most common usage of this specific - % % FaceRecognizer: - % model = cv.BasicFaceRecognizer('Fisherfaces'); + % % Create a new Fisherfaces model and retain all available + % % Fisherfaces, this is the most common usage of this specific + % % FaceRecognizer: + % model = cv.BasicFaceRecognizer('Fisherfaces'); % % And finally train it on the given dataset (the face images and % labels): % - % % This is the common interface to train all of the available FaceRecognizer - % % implementations: - % model.train(images, labels); + % % This is the common interface to train all of the available FaceRecognizer + % % implementations: + % model.train(images, labels); % % See also: cv.LBPHFaceRecognizer.predict % @@ -306,14 +306,13 @@ function train(this, src, labels) function update(this, src, labels) %UPDATE Updates a FaceRecognizer with given data and associated labels % - % obj.update(src, labels) + % obj.update(src, labels) % % ## Input % * __src__ The training images, that means the faces you want to - % learn. The data has to be given as a cell array of - % matrices. + % learn. The data has to be given as a cell array of matrices. % * __labels__ The labels corresponding to the images. have to be - % given as an integer vector. + % given as an integer vector. % % This method updates a (probably trained) FaceRecognizer, but % only if the algorithm supports it. The Local Binary Patterns @@ -324,30 +323,30 @@ function update(this, src, labels) % train empties the existing model and learns a new model, while % update does not delete any model data. % - % % Create a new LBPH model (it can be updated) and use the - % % default parameters, this is the most common usage of this - % % specific FaceRecognizer: - % model = cv.LBPHFaceRecognizer(); - % % This is the common interface to train all of the available - % % FaceRecognizer implementations: - % model.train(images, labels); - % % Some containers to hold new image. - % % You should add some images to the containers: - % newImages = {..}; - % newLabels = [..]; - % % Now updating the model is as easy as calling: - % model.update(newImages,newLabels); - % % This will preserve the old model data and extend the - % % existing model with the new features extracted from - % % newImages! + % % Create a new LBPH model (it can be updated) and use the + % % default parameters, this is the most common usage of this + % % specific FaceRecognizer: + % model = cv.LBPHFaceRecognizer(); + % % This is the common interface to train all of the available + % % FaceRecognizer implementations: + % model.train(images, labels); + % % Some containers to hold new image. + % % You should add some images to the containers: + % newImages = {..}; + % newLabels = [..]; + % % Now updating the model is as easy as calling: + % model.update(newImages,newLabels); + % % This will preserve the old model data and extend the + % % existing model with the new features extracted from + % % newImages! % % Calling update on an Eigenfaces model (see % cv.BasicFaceRecognizer), which doesn't support updating, will % throw an error similar to: % - % OpenCV Error: The function/feature is not implemented (This - % FaceRecognizer (FaceRecognizer.Eigenfaces) does not support - % updating, you have to use FaceRecognizer::train to update it.) + % OpenCV Error: The function/feature is not implemented (This + % FaceRecognizer (FaceRecognizer.Eigenfaces) does not support + % updating, you have to use FaceRecognizer::train to update it.) % % NOTE: The FaceRecognizer does not store your training images, % because this would be very memory intense and it's not the @@ -362,7 +361,7 @@ function update(this, src, labels) function [label, confidence] = predict(this, src) %PREDICT Predicts a label and associated confidence (e.g. distance) for a given input image % - % [label, confidence] = obj.predict(src) + % [label, confidence] = obj.predict(src) % % ## Input % * __src__ Sample image to get a prediction from. @@ -370,24 +369,24 @@ function update(this, src, labels) % ## Output % * __label__ The predicted label for the given image. % * __confidence__ Associated confidence (e.g. distance) for the - % predicted label. + % predicted label. % % The following example shows how to get a prediction from a % trained model: % - % % Do your initialization here (create the FaceRecognizer model) ... - % % Read in a sample image: - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % And get a prediction from the FaceRecognizer: - % predicted = model.predict(img); + % % Do your initialization here (create the FaceRecognizer model) ... + % % Read in a sample image: + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % And get a prediction from the FaceRecognizer: + % predicted = model.predict(img); % % Or to get a prediction and the associated confidence (e.g. % distance): % - % % Do your initialization here (create the FaceRecognizer model) ... - % img = cv.imread('person1/3.jpg', 'Grayscale',true); - % % Get the prediction and associated confidence from the model - % [predicted_label, predicted_confidence] = model.predict(img); + % % Do your initialization here (create the FaceRecognizer model) ... + % img = cv.imread('person1/3.jpg', 'Grayscale',true); + % % Get the prediction and associated confidence from the model + % [predicted_label, predicted_confidence] = model.predict(img); % % See also: cv.LBPHFaceRecognizer.train % @@ -397,19 +396,19 @@ function update(this, src, labels) function results = predict_collect(this, src, varargin) %PREDICT_COLLECT send all result of prediction to collector for custom result handling % - % results = obj.predict_collect(src) - % results = obj.predict_collect(src, 'OptionName',optionValue, ...) + % results = obj.predict_collect(src) + % results = obj.predict_collect(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Sample image to get a prediction from. % % ## Output % * __results__ A struct array of all collected predictions labels - % and associated prediction distances for the given image. + % and associated prediction distances for the given image. % % ## Options % * __Sorted__ If set, results will be sorted by distance. Each - % value is a pair of label and distance. default false + % value is a pair of label and distance. default false % % See also: cv.LBPHFaceRecognizer.predict % @@ -419,7 +418,7 @@ function update(this, src, labels) function setLabelInfo(this, label, strInfo) %SETLABELINFO Sets string info for the specified model's label % - % obj.setLabelInfo(label, strInfo) + % obj.setLabelInfo(label, strInfo) % % ## Input % * __label__ label id. @@ -436,7 +435,7 @@ function setLabelInfo(this, label, strInfo) function strInfo = getLabelInfo(this, label) %GETLABELINFO Gets string information by label % - % strInfo = obj.getLabelInfo(label) + % strInfo = obj.getLabelInfo(label) % % ## Input % * __label__ label id. @@ -456,7 +455,7 @@ function setLabelInfo(this, label, strInfo) function labels = getLabelsByString(this, str) %GETLABELSBYSTRING Gets vector of labels by string % - % labels = obj.getLabelsByString(str) + % labels = obj.getLabelsByString(str) % % ## Input % * __str__ string information (substring matching). @@ -478,15 +477,14 @@ function setLabelInfo(this, label, strInfo) function hists = getHistograms(this) %GETHISTOGRAMS Get calculated LBP histograms % - % hists = obj.getHistograms() + % hists = obj.getHistograms() % % ## Output % * __hists__ Local Binary Patterns Histograms calculated from the - % given training data (empty if none was given). - % A cell array of length `N` (training set size), each cell - % contains a `single` vector of length `B` representing the - % number of features (it depends on the image and grid sizes - % and number of neighbors). + % given training data (empty if none was given). A cell array of + % length `N` (training set size), each cell contains a `single` + % vector of length `B` representing the number of features (it + % depends on the image and grid sizes and number of neighbors). % % See also: cv.LBPHFaceRecognizer.getLabels % @@ -496,12 +494,12 @@ function setLabelInfo(this, label, strInfo) function labels = getLabels(this) %GETLABELS Get labels % - % labels = obj.getLabels() + % labels = obj.getLabels() % % ## Output % * __labels__ Labels corresponding to the calculated Local Binary - % Patterns Histograms. - % An integer vector of length `N` (training set size). + % Patterns Histograms. An integer vector of length `N` (training + % set size). % % Note: returns an empty mat if the model is not trained. % diff --git a/opencv_contrib/+cv/LSDDetector.m b/opencv_contrib/+cv/LSDDetector.m index 71b75a73f..1e4e9b142 100644 --- a/opencv_contrib/+cv/LSDDetector.m +++ b/opencv_contrib/+cv/LSDDetector.m @@ -33,14 +33,15 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = LSDDetector() %LSDDETECTOR Creates an LSDDetector object % - % obj = cv.LSDDetector() + % obj = cv.LSDDetector() % % See also: cv.LSDDetector.detect % @@ -50,7 +51,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LSDDetector % @@ -64,7 +65,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LSDDetector.empty, cv.LSDDetector.load % @@ -74,11 +75,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.LSDDetector.clear % @@ -88,7 +89,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -104,21 +105,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -132,11 +133,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LSDDetector.save, cv.LSDDetector.load % @@ -149,9 +150,9 @@ function load(this, fname_or_str, varargin) function keylines = detect(this, img, varargin) %DETECT Detect lines inside an image or image set % - % keylines = obj.detect(img) - % keylines = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keylines = obj.detect(img) + % keylines = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Input image (first variant), 8-bit grayscale. @@ -159,41 +160,39 @@ function load(this, fname_or_str, varargin) % % ## Output % * __keylines__ Extracted lines for one or more images. In the - % first variant, a 1-by-N structure array. In the second - % variant of the method, `keylines{i}` is a set of keylines - % detected in `imgs{i}`. Each keyline is described with a - % `KeyLine` structure with the following fields: - % * __angle__ orientation of the line. - % * **class_id** object ID, that can be used to cluster - % keylines by the line they represent. - % * __octave__ octave (pyramid layer), from which the - % keyline has been extracted. - % * __pt__ coordinates of the middlepoint `[x,y]`. - % * __response__ the response, by which the strongest - % keylines have been selected. It's represented by the - % ratio between line's length and maximum between - % image's width and height. - % * __size__ minimum area containing line. - % * __startPoint__ the start point of the line in the - % original image `[x,y]`. - % * __endPoint__ the end point of the line in the original - % image `[x,y]`. - % * __startPointInOctave__ the start point of the line in - % the octave it was extracted from `[x,y]`. - % * __endPointInOctave__ the end point of the line in the - % octave it was extracted from `[x,y]`. - % * __lineLength__ the length of line. - % * __numOfPixels__ number of pixels covered by the line. + % first variant, a 1-by-N structure array. In the second + % variant of the method, `keylines{i}` is a set of keylines + % detected in `imgs{i}`. Each keyline is described with a + % `KeyLine` structure with the following fields: + % * __angle__ orientation of the line. + % * **class_id** object ID, that can be used to cluster keylines + % by the line they represent. + % * __octave__ octave (pyramid layer), from which the keyline + % has been extracted. + % * __pt__ coordinates of the middlepoint `[x,y]`. + % * __response__ the response, by which the strongest keylines + % have been selected. It's represented by the ratio between + % line's length and maximum between image's width and height. + % * __size__ minimum area containing line. + % * __startPoint__ the start point of the line in the original + % image `[x,y]`. + % * __endPoint__ the end point of the line in the original image + % `[x,y]`. + % * __startPointInOctave__ the start point of the line in the + % octave it was extracted from `[x,y]`. + % * __endPointInOctave__ the end point of the line in the octave + % it was extracted from `[x,y]`. + % * __lineLength__ the length of line. + % * __numOfPixels__ number of pixels covered by the line. % % ## Options % * __Scale__ scale factor used in pyramids generation. default 2 % * __NumOctaves__ number of octaves inside pyramid. default 1 % * __Mask__ optional mask matrix to detect only `KeyLines` of - % interest. It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each - % input image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % interest. It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % `KeyLine` is a struct to represent a line. % diff --git a/opencv_contrib/+cv/LUCID.m b/opencv_contrib/+cv/LUCID.m index 68d38b264..faa93166e 100644 --- a/opencv_contrib/+cv/LUCID.m +++ b/opencv_contrib/+cv/LUCID.m @@ -23,22 +23,23 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = LUCID(varargin) %LUCID Constructor % - % obj = cv.LUCID() - % obj = cv.LUCID(..., 'OptionName',optionValue, ...) + % obj = cv.LUCID() + % obj = cv.LUCID(..., 'OptionName',optionValue, ...) % % ## Options % * __LucidKernel__ kernel for descriptor construction, where - % 1=3x3, 2=5x5, 3=7x7 and so forth. default 1 + % 1=3x3, 2=5x5, 3=7x7 and so forth. default 1 % * __BlurKernel__ kernel for blurring image prior to descriptor - % construction, where 1=3x3, 2=5x5, 3=7x7 and so forth. - % default 2 + % construction, where 1=3x3, 2=5x5, 3=7x7 and so forth. + % default 2 % % See also: cv.LUCID.compute % @@ -48,7 +49,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LUCID % @@ -59,7 +60,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -73,7 +74,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LUCID.empty % @@ -83,11 +84,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.LUCID.clear, cv.LUCID.load % @@ -97,7 +98,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -113,21 +114,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -141,11 +142,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LUCID.save, cv.LUCID.load % @@ -158,16 +159,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `Hamming` for LUCID. % @@ -179,11 +180,11 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. Depends on `LucidKernel` argument in - % constructor. + % constructor. % % See also: cv.LUCID.descriptorType, cv.LUCID.compute % @@ -193,7 +194,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -208,26 +209,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit 3-channel color image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.LUCID.LUCID diff --git a/opencv_contrib/+cv/LearningBasedWB.m b/opencv_contrib/+cv/LearningBasedWB.m index dfc43d15e..a2b182dd5 100644 --- a/opencv_contrib/+cv/LearningBasedWB.m +++ b/opencv_contrib/+cv/LearningBasedWB.m @@ -9,7 +9,7 @@ % To mask out saturated pixels this function uses only pixels that satisfy % the following condition: % - % max(R,G,B)/RangeMaxVal < SaturationThreshold + % max(R,G,B)/RangeMaxVal < SaturationThreshold % % Currently supports RGB images of type `uint8` and `uint16`. % @@ -24,7 +24,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -47,12 +48,12 @@ function this = LearningBasedWB(varargin) %LEARNINGBASEDWB Creates an instance of LearningBasedWB % - % obj = cv.LearningBasedWB() - % obj = cv.LearningBasedWB('OptionName',optionValue, ...) + % obj = cv.LearningBasedWB() + % obj = cv.LearningBasedWB('OptionName',optionValue, ...) % % ## Options % * __PathToModel__ Path to a .yml file with the model. If not - % specified, the default model is used. Not set by default. + % specified, the default model is used. Not set by default. % % See also: cv.LearningBasedWB.balanceWhite % @@ -62,7 +63,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.LearningBasedWB % @@ -76,7 +77,7 @@ function delete(this) function dst = balanceWhite(this, src) %BALANCEWHITE Applies white balancing to the input image % - % dst = obj.balanceWhite(src) + % dst = obj.balanceWhite(src) % % ## Input % * __src__ Input image, `uint8` or `uint16` color image. @@ -95,14 +96,14 @@ function delete(this) function dst = extractSimpleFeatures(this, src) %EXTRACTSIMPLEFEATURES Implements the feature extraction part of the algorithm % - % dst = obj.extractSimpleFeatures(src) + % dst = obj.extractSimpleFeatures(src) % % ## Input % * __src__ Input 3-channel image (BGR color space is assumed). % % ## Output % * __dst__ An array of four (r,g) chromaticity tuples - % corresponding to the features listed below. + % corresponding to the features listed below. % % In accordance with [Cheng2015], computes the following features % for the input image: @@ -130,7 +131,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.LearningBasedWB.empty % @@ -140,11 +141,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.LearningBasedWB.clear % @@ -154,11 +155,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.LearningBasedWB.save, cv.LearningBasedWB.load % @@ -168,7 +169,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -183,21 +184,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/MSDDetector.m b/opencv_contrib/+cv/MSDDetector.m index de4ff1cb5..723979bf9 100644 --- a/opencv_contrib/+cv/MSDDetector.m +++ b/opencv_contrib/+cv/MSDDetector.m @@ -26,31 +26,32 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = MSDDetector(varargin) %MSDDETECTOR The full constructor % - % obj = cv.MSDDetector() - % obj = cv.MSDDetector('OptionName',optionValue, ...) + % obj = cv.MSDDetector() + % obj = cv.MSDDetector('OptionName',optionValue, ...) % % ## Options % * __PatchRadius__ Patch radius. default 3 % * __SearchAreaRadius__ Search Area radius. default 5 % * __NMSRadius__ Non Maxima Suppression spatial radius. default 5 % * __NMSScaleRadius__ Non Maxima Suppression scale radius. - % default 0 + % default 0 % * __ThSaliency__ Saliency threshold. default 250.0 % * __KNN__ Number of nearest neighbors. default 4 % * __ScaleFactor__ Scale factor for building up the image - % pyramid. default 1.25 + % pyramid. default 1.25 % * __NScales__ Number of scales number of scales for building up - % the image pyramid (if set to -1, this number is - % automatically determined). default -1 + % the image pyramid (if set to -1, this number is automatically + % determined). default -1 % * __ComputeOrientation__ Flag for associating a canoncial - % orientation to each keypoint. default false + % orientation to each keypoint. default false % % See also: cv.MSDDetector.detect % @@ -60,7 +61,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MSDDetector % @@ -71,7 +72,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -85,7 +86,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MSDDetector.empty, cv.MSDDetector.load % @@ -95,11 +96,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MSDDetector.clear, cv.MSDDetector.load % @@ -109,7 +110,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -125,21 +126,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -153,11 +154,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MSDDetector.save, cv.MSDDetector.load % @@ -170,27 +171,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale or color image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.MSDDetector.MSDDetector % diff --git a/opencv_contrib/+cv/MotionSaliencyBinWangApr2014.m b/opencv_contrib/+cv/MotionSaliencyBinWangApr2014.m index afebfad08..cbcfd6fee 100644 --- a/opencv_contrib/+cv/MotionSaliencyBinWangApr2014.m +++ b/opencv_contrib/+cv/MotionSaliencyBinWangApr2014.m @@ -24,7 +24,7 @@ % % Saliency UML diagram: % - % ![image](http://docs.opencv.org/3.1.0/saliency.png) + % ![image](https://docs.opencv.org/3.3.1/saliency.png) % % To see how API works, try tracker demo: `computeSaliency_demo.m`. % @@ -49,7 +49,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -63,7 +64,7 @@ function this = MotionSaliencyBinWangApr2014() %MOTIONSALIENCYBINWANGAPR2014 Constructor, creates a specialized saliency algorithm of this type % - % obj = cv.MotionSaliencyBinWangApr2014() + % obj = cv.MotionSaliencyBinWangApr2014() % % See also: cv.MotionSaliencyBinWangApr2014.computeSaliency % @@ -73,7 +74,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.MotionSaliencyBinWangApr2014 % @@ -84,7 +85,7 @@ function delete(this) function setImagesize(this, W, H) %SETIMAGESIZE This is a utility function that allows to set the correct size (taken from the input image) in the corresponding variables that will be used to size the data structures of the algorithm % - % obj.setImagesize(W, H) + % obj.setImagesize(W, H) % % ## Input % * __W__ width of input image. @@ -98,7 +99,7 @@ function setImagesize(this, W, H) function init(this) %INIT This function allows the correct initialization of all data structures that will be used by the algorithm % - % obj.init() + % obj.init() % % See also: cv.MotionSaliencyBinWangApr2014.setImagesize % @@ -111,7 +112,7 @@ function init(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.MotionSaliencyBinWangApr2014.empty, % cv.MotionSaliencyBinWangApr2014.load @@ -122,11 +123,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.MotionSaliencyBinWangApr2014.clear, % cv.MotionSaliencyBinWangApr2014.load @@ -137,7 +138,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -153,21 +154,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -181,11 +182,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.MotionSaliencyBinWangApr2014.save, % cv.MotionSaliencyBinWangApr2014.load @@ -196,33 +197,21 @@ function load(this, fname_or_str, varargin) %% Saliency, MotionSaliency methods - function className = getClassName(this) - %GETCLASSNAME Get the name of the specific saliency type - % - % className = obj.getClassName() - % - % ## Output - % * __className__ The name of the tracker initializer. - % - % See also: cv.MotionSaliencyBinWangApr2014.MotionSaliencyBinWangApr2014 - % - className = MotionSaliencyBinWangApr2014_(this.id, 'getClassName'); - end - function saliencyMap = computeSaliency(this, img) %COMPUTESALIENCY Compute the saliency % - % saliencyMap = obj.computeSaliency(img) + % saliencyMap = obj.computeSaliency(img) % % ## Input - % * __img__ The input image, 8-bit. + % * __img__ The input image, 8-bit grayscale. % % ## Output - % * __saliencyMap__ The computed saliency map. Is a binarized map - % that, in accordance with the nature of the algorithm, - % highlights the moving objects or areas of change in the - % scene. The saliency map is given by a matrix (one for each - % frame of an hypothetical video stream). + % * __saliencyMap__ The computed saliency map + % (background-foreground mask). Is a binarized map that, in + % accordance with the nature of the algorithm, highlights the + % moving objects or areas of change in the scene. The saliency + % map is given by a matrix (one for each frame of an + % hypothetical video stream). % % Performs all the operations and calls all internal functions % necessary for the accomplishment of the Fast Self-tuning diff --git a/opencv_contrib/+cv/ObjectnessBING.m b/opencv_contrib/+cv/ObjectnessBING.m index 87dc7fbea..1c3f9470f 100644 --- a/opencv_contrib/+cv/ObjectnessBING.m +++ b/opencv_contrib/+cv/ObjectnessBING.m @@ -24,7 +24,7 @@ % % Saliency UML diagram: % - % ![image](http://docs.opencv.org/3.1.0/saliency.png) + % ![image](https://docs.opencv.org/3.3.1/saliency.png) % % To see how API works, try tracker demo: `computeSaliency_demo.m`. % @@ -50,7 +50,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -66,7 +67,7 @@ function this = ObjectnessBING() %OBJECTNESSBING Constructor, creates a specialized saliency algorithm of this type % - % obj = cv.ObjectnessBING() + % obj = cv.ObjectnessBING() % % See also: cv.ObjectnessBING.computeSaliency % @@ -76,7 +77,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.ObjectnessBING % @@ -87,13 +88,13 @@ function delete(this) function objectnessValues = getObjectnessValues(this) %GETOBJECTNESSVALUES Return the list of the rectangles' objectness value % - % objectnessValues = obj.getObjectnessValues() + % objectnessValues = obj.getObjectnessValues() % % ## Output % * __objectnessValues__ vector of floats in the same order as - % `objectnessBoundingBox` returned by the algorithm (in - % `computeSaliency` function). The bigger value these scores - % are, it is more likely to be an object window. + % `objectnessBoundingBox` returned by the algorithm (in + % `computeSaliency` function). The bigger value these scores + % are, it is more likely to be an object window. % % See also: cv.ObjectnessBING.computeSaliency % @@ -103,7 +104,7 @@ function delete(this) function setTrainingPath(this, trainingPath) %SETTRAININGPATH This is a utility function that allows to set the correct path from which the algorithm will load the trained model % - % obj.setTrainingPath(trainingPath) + % obj.setTrainingPath(trainingPath) % % ## Input % * __trainingPath__ trained model path. @@ -116,7 +117,7 @@ function setTrainingPath(this, trainingPath) function setBBResDir(this, resultsDir) %SETBBRESDIR This is a utility function that allows to set an arbitrary path in which the algorithm will save the optional results % - % obj.setBBResDir(resultsDir) + % obj.setBBResDir(resultsDir) % % ## Input % * __resultsDir__ results' folder path. @@ -135,7 +136,7 @@ function setBBResDir(this, resultsDir) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.ObjectnessBING.empty, cv.ObjectnessBING.load % @@ -145,11 +146,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.ObjectnessBING.clear, cv.ObjectnessBING.load % @@ -159,7 +160,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -175,21 +176,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -203,11 +204,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.ObjectnessBING.save, cv.ObjectnessBING.load % @@ -217,33 +218,20 @@ function load(this, fname_or_str, varargin) %% Saliency, Objectness methods - function className = getClassName(this) - %GETCLASSNAME Get the name of the specific saliency type - % - % className = obj.getClassName() - % - % ## Output - % * __className__ The name of the tracker initializer. - % - % See also: cv.ObjectnessBING.ObjectnessBING - % - className = ObjectnessBING_(this.id, 'getClassName'); - end - function objectnessBoundingBox = computeSaliency(this, img) %COMPUTESALIENCY Compute the saliency % - % objectnessBoundingBox = obj.computeSaliency(img) + % objectnessBoundingBox = obj.computeSaliency(img) % % ## Input - % * __img__ The input image, 8-bit. + % * __img__ The input image, 8-bit color. % % ## Output % * __objectnessBoundingBox__ objectness Bounding Box vector. - % According to the result given by this specialized - % algorithm, the `objectnessBoundingBox` is a cell array of - % 4-element vectors. Each bounding box is represented by - % `[minX, minY, maxX, maxY]`. + % According to the result given by this specialized algorithm, + % the `objectnessBoundingBox` is a cell array of 4-element + % vectors. Each bounding box is represented by + % `[minX, minY, maxX, maxY]`. % % Performs all the operations and calls all internal functions % necessary for the accomplishment of the Binarized normed diff --git a/opencv_contrib/+cv/OpticalFlowPCAFlow.m b/opencv_contrib/+cv/OpticalFlowPCAFlow.m index 22c7d8483..4b0a9f38c 100644 --- a/opencv_contrib/+cv/OpticalFlowPCAFlow.m +++ b/opencv_contrib/+cv/OpticalFlowPCAFlow.m @@ -23,7 +23,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -31,25 +32,24 @@ function this = OpticalFlowPCAFlow() %OPTICALFLOWPCAFLOW Creates an instance of PCAFlow % - % obj = cv.OpticalFlowPCAFlow() - % obj = cv.OpticalFlowPCAFlow('OptionName',optionValue, ...) + % obj = cv.OpticalFlowPCAFlow() + % obj = cv.OpticalFlowPCAFlow('OptionName',optionValue, ...) % % ## Options % * __Prior__ Learned prior or no prior (default). Specified as a - % path to prior. This instantiates a class used for imposing - % a learned prior on the resulting optical flow. Solution - % will be regularized according to this prior. You need to - % generate appropriate prior file with "learn_prior.py" - % script beforehand. + % path to prior. This instantiates a class used for imposing a + % learned prior on the resulting optical flow. Solution will be + % regularized according to this prior. You need to generate + % appropriate prior file with "learn_prior.py" script beforehand. % * __BasisSize__ Number of basis vectors. default [18,14] % * __SparseRate__ Controls density of sparse matches. - % default 0.024 + % default 0.024 % * __RetainedCornersFraction__ Retained corners fraction. - % default 0.2 + % default 0.2 % * __OcclusionsThreshold__ Occlusion threshold. default 0.0003 % * __DampingFactor__ Regularization term for solving - % least-squares. It is not related to the prior - % regularization. default 0.00002 + % least-squares. It is not related to the prior regularization. + % default 0.00002 % * __ClaheClip__ Clip parameter for CLAHE. default 14 % % See also: cv.OpticalFlowPCAFlow.calc @@ -60,7 +60,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.OpticalFlowPCAFlow % @@ -74,17 +74,17 @@ function delete(this) function flow = calc(this, I0, I1, varargin) %CALC Calculates an optical flow % - % flow = obj.calc(I0, I1) - % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) + % flow = obj.calc(I0, I1) + % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * __flow__ computed flow image that has the same size as `I0` - % and type `single` (2-channels). + % and type `single` (2-channels). % % ## Options % * __InitialFlow__ specify the initial flow. Not set by default. @@ -97,7 +97,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Releases all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % OpticalFlowPCAFlow_(this.id, 'collectGarbage'); end @@ -108,7 +108,7 @@ function collectGarbage(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.OpticalFlowPCAFlow.empty % @@ -118,11 +118,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.OpticalFlowPCAFlow.clear % @@ -132,11 +132,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.OpticalFlowPCAFlow.save, cv.OpticalFlowPCAFlow.load % @@ -146,7 +146,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -161,21 +161,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/PCTSignatures.m b/opencv_contrib/+cv/PCTSignatures.m index 2f58ecd6b..0986b37b2 100644 --- a/opencv_contrib/+cv/PCTSignatures.m +++ b/opencv_contrib/+cv/PCTSignatures.m @@ -9,7 +9,7 @@ % set of clusters is the signature of the input image. % % A signature is an array of 8-dimensional points. Used dimensions are: - % weight, x/y position; lab color L/a/b, contrast, entropy. + % weight, x/y position; Lab color L/a/b, contrast, entropy. % % ## References % [KrulisLS16]: @@ -25,7 +25,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end % sampler (PCTSampler) @@ -94,32 +95,32 @@ function this = PCTSignatures(varargin) %PCTSIGNATURES Creates PCTSignatures algorithm % - % obj = cv.PCTSignatures() - % obj = cv.PCTSignatures('OptionName',optionValue, ...) + % obj = cv.PCTSignatures() + % obj = cv.PCTSignatures('OptionName',optionValue, ...) % - % obj = cv.PCTSignatures(initSamplingPoints, initSeedCount) + % obj = cv.PCTSignatures(initSamplingPoints, initSeedCount) % - % obj = cv.PCTSignatures(initSamplingPoints, initClusterSeedIndexes) + % obj = cv.PCTSignatures(initSamplingPoints, initClusterSeedIndexes) % % ## Input % * __initSamplingPoints__ Sampling points used in image sampling. % * __initSeedCount__ Number of initial clusterization seeds. Must - % be lower or equal to `length(initSamplingPoints)`. + % be lower or equal to `length(initSamplingPoints)`. % * __initClusterSeedIndexes__ Indexes of initial clusterization - % seeds. Its size must be lower or equal to - % `length(initSamplingPoints)`. + % seeds. Its size must be lower or equal to + % `length(initSamplingPoints)`. % % ## Options (first variant) % * __InitSampleCount__ Number of points used for image sampling. - % default 2000 + % default 2000 % * __InitSeedCount__ Number of initial clusterization seeds. Must - % be lower or equal to `InitSampleCount`. default 400 + % be lower or equal to `InitSampleCount`. default 400 % * __PointDistribution__ Distribution of generated points. - % Available: - % * __Uniform__ (default) Generate numbers uniformly. - % * __Regular__ Generate points in a regular grid. - % * __Normal__ Generate points with normal (gaussian) - % distribution. + % Available: + % * __Uniform__ (default) Generate numbers uniformly. + % * __Regular__ Generate points in a regular grid. + % * __Normal__ Generate points with normal (gaussian) + % distribution. % % In the first variant, it creates PCTSignatures algorithm using % sample and seed count. It generates its own sets of sampling @@ -141,7 +142,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.PCTSignatures % @@ -154,15 +155,15 @@ function delete(this) function initPoints = generateInitPoints(count, pointDistribution) %GENERATEINITPOINTS Generates initial sampling points according to selected point distribution % - % initPoints = cv.PCTSignatures.generateInitPoints(count, pointDistribution) + % initPoints = cv.PCTSignatures.generateInitPoints(count, pointDistribution) % % ## Input % * __count__ Number of points to generate. % * __pointDistribution__ Point distribution selector. Available: - % * __Uniform__ Generate numbers uniformly. - % * __Regular__ Generate points in a regular grid. - % * __Normal__ Generate points with normal (gaussian) - % distribution. + % * __Uniform__ Generate numbers uniformly. + % * __Regular__ Generate points in a regular grid. + % * __Normal__ Generate points with normal (gaussian) + % distribution. % % ## Output % * __initPoints__ Output vector of generated points. @@ -177,8 +178,8 @@ function delete(this) function result = drawSignature(source, signature, varargin) %DRAWSIGNATURE Draws signature in the source image and outputs the result % - % result = cv.PCTSignatures.drawSignature(source, signature) - % result = cv.PCTSignatures.drawSignature(..., 'OptionName',optionValue, ...) + % result = cv.PCTSignatures.drawSignature(source, signature) + % result = cv.PCTSignatures.drawSignature(..., 'OptionName',optionValue, ...) % % ## Input % * __source__ Source image. @@ -189,9 +190,9 @@ function delete(this) % % ## Options % * __RadiusToShorterSideRatio__ Determines maximal radius of - % signature in the output image. default 1/8 + % signature in the output image. default 1/8 % * __BorderThickness__ Border thickness of the visualized - % signature. default 1 + % signature. default 1 % % Signatures are visualized as a circle with radius based on % signature weight and color based on signature color. Contrast @@ -208,7 +209,7 @@ function delete(this) function signature = computeSignature(this, img) %COMPUTESIGNATURE Computes signature of given image % - % signature = obj.computeSignature(img) + % signature = obj.computeSignature(img) % % ## Input % * __img__ Input color image of `uint8` type. @@ -224,7 +225,7 @@ function delete(this) function signatures = computeSignatures(this, imgs) %COMPUTESIGNATURES Computes signatures for multiple images in parallel % - % signatures = obj.computeSignatures(imgs) + % signatures = obj.computeSignatures(imgs) % % ## Input % * __imgs__ Vector of input images of `uint8` type. @@ -243,7 +244,7 @@ function delete(this) function sampleCount = getSampleCount(this) %GETSAMPLECOUNT Number of initial samples taken from the image % - % sampleCount = obj.getSampleCount() + % sampleCount = obj.getSampleCount() % % ## Output % * __sampleCount__ Number of initial samples taken from the image. @@ -256,7 +257,7 @@ function delete(this) function samplingPoints = getSamplingPoints(this) %GETSAMPLINGPOINTS Initial samples taken from the image % - % samplingPoints = obj.getSamplingPoints() + % samplingPoints = obj.getSamplingPoints() % % ## Output % * __samplingPoints__ Initial samples taken from the image. @@ -271,7 +272,7 @@ function delete(this) function setSamplingPoints(this, samplingPoints) %SETSAMPLINGPOINTS Sets sampling points used to sample the input image % - % obj.setSamplingPoints(samplingPoints) + % obj.setSamplingPoints(samplingPoints) % % ## Input % * __samplingPoints__ Vector of sampling points in range [0..1). @@ -284,18 +285,18 @@ function setSamplingPoints(this, samplingPoints) function setWeight(this, idx, value) %SETWEIGHT Sets weight (multiplicative constant) that linearly stretch individual axis of the feature space % - % obj.setWeight(idx, value) + % obj.setWeight(idx, value) % % ## Input % * __idx__ ID of the weight (0-based index). One of: - % * __0__ weight. - % * __1__ X. - % * __2__ Y. - % * __3__ L. - % * __4__ A. - % * __5__ B. - % * __6__ Contrast. - % * __7__ Entropy. + % * __0__ weight. + % * __1__ X. + % * __2__ Y. + % * __3__ L. + % * __4__ A. + % * __5__ B. + % * __6__ Contrast. + % * __7__ Entropy. % * __value__ Value of the weight. % % See also: cv.PCTSignatures.setWeights @@ -306,11 +307,11 @@ function setWeight(this, idx, value) function setWeights(this, weights) %SETWEIGHTS Sets weights (multiplicative constants) that linearly stretch individual axes of the feature space % - % obj.setWeights(weights) + % obj.setWeights(weights) % % ## Input % * __weights__ Values of all weights. A float vector of the form - % `[weight, X, Y, L, A, B, Contrast, Entropy]`. + % `[weight, X, Y, L, A, B, Contrast, Entropy]`. % % See also: cv.PCTSignatures.setWeight % @@ -320,18 +321,18 @@ function setWeights(this, weights) function setTranslation(this, idx, value) %SETRANSLATION Sets translation of the individual axis of the feature space % - % obj.setTranslation(idx, value) + % obj.setTranslation(idx, value) % % ## Input % * __idx__ ID of the translation (0-based index). One of: - % * __0__ weight. - % * __1__ X. - % * __2__ Y. - % * __3__ L. - % * __4__ A. - % * __5__ B. - % * __6__ Contrast. - % * __7__ Entropy. + % * __0__ weight. + % * __1__ X. + % * __2__ Y. + % * __3__ L. + % * __4__ A. + % * __5__ B. + % * __6__ Contrast. + % * __7__ Entropy. % * __value__ Value of the translation. % % See also: cv.PCTSignatures.setTranslations @@ -342,11 +343,11 @@ function setTranslation(this, idx, value) function setTranslations(this, translations) %SETRANSLATIONS Sets translations of the individual axes of the feature space % - % obj.setTranslations(translations) + % obj.setTranslations(translations) % % ## Input % * __translations__ Values of all translations. A float vector - % of the form `[weight, X, Y, L, A, B, Contrast, Entropy]`. + % of the form `[weight, X, Y, L, A, B, Contrast, Entropy]`. % % See also: cv.PCTSignatures.setTranslation % @@ -359,11 +360,11 @@ function setTranslations(this, translations) function initSeedCount = getInitSeedCount(this) %GETINITSEEDCOUNT Number of initial seeds for the k-means algorithm % - % initSeedCount = obj.getInitSeedCount() + % initSeedCount = obj.getInitSeedCount() % % ## Output % * __initSeedCount__ Number of initial seeds (initial number of - % clusters) for the k-means algorithm. + % clusters) for the k-means algorithm. % % See also: cv.PCTSignatures.getInitSeedIndexes % @@ -373,11 +374,11 @@ function setTranslations(this, translations) function initSeedIndexes = getInitSeedIndexes(this) %GETINITSEEDINDEXES Initial seeds for the k-means algorithm % - % initSeedIndexes = obj.getInitSeedIndexes() + % initSeedIndexes = obj.getInitSeedIndexes() % % ## Output % * __initSeedIndexes__ Initial seeds (initial number of clusters) - % for the k-means algorithm (0-based indices). + % for the k-means algorithm (0-based indices). % % See also: cv.PCTSignatures.setInitSeedIndexes % @@ -387,11 +388,11 @@ function setTranslations(this, translations) function setInitSeedIndexes(this, initSeedIndexes) %SETINITSEEDINDEXES Sets initial seed indexes for the k-means algorithm % - % obj.setInitSeedIndexes(initSeedIndexes) + % obj.setInitSeedIndexes(initSeedIndexes) % % ## Input % * __initSeedIndexes__ Vector of initial seed indexes for the - % k-means algorithm (0-based indices). + % k-means algorithm (0-based indices). % % See also: cv.PCTSignatures.getInitSeedIndexes % @@ -404,7 +405,7 @@ function setInitSeedIndexes(this, initSeedIndexes) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.PCTSignatures.empty % @@ -414,11 +415,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.PCTSignatures.clear % @@ -428,11 +429,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.PCTSignatures.save, cv.PCTSignatures.load % @@ -442,7 +443,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -457,21 +458,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/PCTSignaturesSQFD.m b/opencv_contrib/+cv/PCTSignaturesSQFD.m index 653334434..9b0eb5fd8 100644 --- a/opencv_contrib/+cv/PCTSignaturesSQFD.m +++ b/opencv_contrib/+cv/PCTSignaturesSQFD.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -21,26 +22,26 @@ function this = PCTSignaturesSQFD(varargin) %PCTSIGNATURESSQFD Creates the algorithm instance using selected distance function, similarity function and similarity function parameter % - % obj = cv.PCTSignaturesSQFD() - % obj = cv.PCTSignaturesSQFD('OptionName',optionValue, ...) + % obj = cv.PCTSignaturesSQFD() + % obj = cv.PCTSignaturesSQFD('OptionName',optionValue, ...) % % ## Options % * __DistanceFunction__ Lp Distance function selector. Available: - % * **L0_25** - % * **L0_5** - % * __L1__ - % * __L2__ (default) - % * __L2Squared__ - % * __L5__ - % * **L_Inf** + % * **L0_25** + % * **L0_5** + % * __L1__ + % * __L2__ (default) + % * __L2Squared__ + % * __L5__ + % * **L_Inf** % * __SimilarityFunction__ Similarity function selector, for - % selected distance function `d(c_i, c_j)` and parameter - % `alpha`. Available: - % * __Minus__ `-d(c_i, c_j)` - % * __Gaussian__ `exp(-alpha * d^2(c_i, c_j))` - % * __Heuristic__ (default) `1/(alpha + d(c_i, c_j))` + % selected distance function `d(c_i, c_j)` and parameter + % `alpha`. Available: + % * __Minus__ `-d(c_i, c_j)` + % * __Gaussian__ `exp(-alpha * d^2(c_i, c_j))` + % * __Heuristic__ (default) `1/(alpha + d(c_i, c_j))` % * __SimilarityParameter__ Parameter of the similarity function. - % default 1.0 + % default 1.0 % % See also: cv.PCTSignaturesSQFD.computeQuadraticFormDistance % @@ -50,7 +51,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.PCTSignaturesSQFD % @@ -64,7 +65,7 @@ function delete(this) function dist = computeQuadraticFormDistance(this, signature0, signature1) %COMPUTEQUADRATICFORMDISTANCE Computes Signature Quadratic Form Distance of two signatures % - % dist = obj.computeQuadraticFormDistance(signature0, signature1) + % dist = obj.computeQuadraticFormDistance(signature0, signature1) % % ## Input % * __signature0__ The first signature. @@ -82,13 +83,13 @@ function delete(this) function distances = computeQuadraticFormDistances(this, sourceSignature, imageSignatures) %COMPUTEQUADRATICFORMDISTANCES Computes Signature Quadratic Form Distance between the reference signature and each of the other image signatures % - % distances = obj.computeQuadraticFormDistances(sourceSignature, imageSignatures) + % distances = obj.computeQuadraticFormDistances(sourceSignature, imageSignatures) % % ## Input % * __sourceSignature__ The signature to measure distance of other - % signatures from. + % signatures from. % * __imageSignatures__ Vector of signatures to measure distance - % from the source signature. + % from the source signature. % % ## Output % * __distances__ Output vector of measured distances. @@ -105,7 +106,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.PCTSignaturesSQFD.empty % @@ -115,11 +116,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.PCTSignaturesSQFD.clear % @@ -129,11 +130,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.PCTSignaturesSQFD.save, cv.PCTSignaturesSQFD.load % @@ -143,7 +144,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -158,21 +159,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/PeiLinNormalization.m b/opencv_contrib/+cv/PeiLinNormalization.m new file mode 100644 index 000000000..6e5934202 --- /dev/null +++ b/opencv_contrib/+cv/PeiLinNormalization.m @@ -0,0 +1,24 @@ +%PEILINNORMALIZATION Calculates an affine transformation that normalize given image using Pei/Lin Normalization +% +% T = cv.PeiLinNormalization(I) +% +% ## Input +% * __I__ Given transformed image. +% +% ## Output +% * __T__ 2x3 transformation matrix corresponding to inversed image +% transformation. +% +% Assume given image `I = T(Ibar)` where `Ibar` is a normalized image and `T` +% is an affine transformation distorting this image by translation, rotation, +% scaling and skew. The function returns an affine transformation matrix +% corresponding to the transformation `T_inv` described in [PeiLin95]. For +% more details about this implementation, please see [PeiLin95]. +% +% ## References +% [PeiLin95]: +% > Soo-Chang Pei and Chao-Nan Lin. "Image normalization for pattern +% > recognition". Image and Vision Computing, Vol. 13, N.10, pp. 711-723, 1995. +% +% See also: +% diff --git a/opencv_contrib/+cv/Plot2d.m b/opencv_contrib/+cv/Plot2d.m index c48342205..097bce3f6 100644 --- a/opencv_contrib/+cv/Plot2d.m +++ b/opencv_contrib/+cv/Plot2d.m @@ -8,18 +8,19 @@ % % ## Example % - % x = linspace(-2*pi, 2*pi, 100); - % y = sin(x) + randn(size(x))*0.1; - % p = cv.Plot2d(x, y); - % p.PlotSize = [640 480]; - % img = p.render(); - % imshow(img) + % x = linspace(-2*pi, 2*pi, 100); + % y = sin(x) + randn(size(x))*0.1; + % p = cv.Plot2d(x, y); + % p.PlotSize = [640 480]; + % img = p.render(); + % imshow(img) % % See also: cv.Plot2d.Plot2d, plot % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent, GetAccess = private) @@ -49,20 +50,32 @@ PlotTextColor % plot dimensions `[w,h]` ([400,300] minimum size). default [600,400] PlotSize + % whether grid lines are drawn. default true + ShowGrid + % whether text is drawn. default true + ShowText + % number of grid lines. default 10 + GridLinesNumber + % invert plot orientation. default false + InvertOrientation + % the index of the point in data array which coordinates will be + % printed on the top-left corner of the plot (if ShowText is true). + % defaults to last point + PointIdxToPrint end methods function this = Plot2d(varargin) %PLOT2D Creates Plot2d object % - % obj = cv.Plot2d(dataY) - % obj = cv.Plot2d(dataX, dataY) + % obj = cv.Plot2d(dataY) + % obj = cv.Plot2d(dataX, dataY) % % ## Input % * __dataY__ 1xN or Nx1 matrix containing `Y` values of points to - % plot. In the first variant, `X` values will be equal to - % indexes of corresponding elements in data matrix, i.e - % `x = 0:(numel(y)-1)`. + % plot. In the first variant, `X` values will be equal to + % indexes of corresponding elements in data matrix, i.e + % `x = 0:(numel(y)-1)`. % * __dataX__ 1xN or Nx1 matrix, `X` values of points to plot. % % See also: cv.Plot2d.render @@ -73,7 +86,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Plot2d % @@ -84,16 +97,16 @@ function delete(this) function plotResult = render(this, varargin) %RENDER Renders the plot to a matrix % - % plotResult = obj.render() - % plotResult = obj.render('OptionName',optionValue, ...) + % plotResult = obj.render() + % plotResult = obj.render('OptionName',optionValue, ...) % % ## Output % * __plotResult__ Plot result, 8-bit 3-channel image. % % ## Options % * __FlipChannels__ whether to flip the order of color channels - % in output, from OpenCV's BGR to between MATLAB's RGB. - % default true + % in output, from OpenCV's BGR to between MATLAB's RGB. + % default true % % See also: plot, getframe, print % @@ -106,7 +119,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.Plot2d.empty, cv.Plot2d.load % @@ -116,11 +129,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.Plot2d.clear, cv.Plot2d.load % @@ -130,7 +143,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -146,21 +159,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -174,11 +187,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.Plot2d.save, cv.Plot2d.load % @@ -235,6 +248,27 @@ function load(this, fname_or_str, varargin) function set.PlotSize(this, value) Plot2d_(this.id, 'set', 'PlotSize', value); end + + function set.ShowGrid(this, value) + Plot2d_(this.id, 'set', 'ShowGrid', value); + end + + function set.ShowText(this, value) + Plot2d_(this.id, 'set', 'ShowText', value); + end + + function set.GridLinesNumber(this, value) + Plot2d_(this.id, 'set', 'GridLinesNumber', value); + end + + function set.InvertOrientation(this, value) + Plot2d_(this.id, 'set', 'InvertOrientation', value); + end + + function set.PointIdxToPrint(this, value) + Plot2d_(this.id, 'set', 'PointIdxToPrint', value); + end + end end diff --git a/opencv_contrib/+cv/Retina.m b/opencv_contrib/+cv/Retina.m index 3f7d74a2c..cd4cc673c 100644 --- a/opencv_contrib/+cv/Retina.m +++ b/opencv_contrib/+cv/Retina.m @@ -114,7 +114,7 @@ % backlight problems. Here is the considered input... % *"Well,i could see more with my eyes than what i captured with my camera..."* % - % ![image](http://docs.opencv.org/3.1.0/retinaInput.jpg) + % ![image](https://docs.opencv.org/3.3.1/retinaInput.jpg) % % Below, the retina foveal model applied on the entire image with default % parameters. Details are enforced whatever the local luminance is. Here @@ -123,7 +123,7 @@ % parameters discussion below and increase `HorizontalCellsGain` near 1 to % remove them. % - % ![image](http://docs.opencv.org/3.1.0/retinaOutput_default.jpg) + % ![image](https://docs.opencv.org/3.3.1/retinaOutput_default.jpg) % % Below, a second retina foveal model output applied on the entire image % with a parameters setup focused on naturalness perception. @@ -140,7 +140,7 @@ % - `HorizontalCellsGain = 0.3` % - `PhotoreceptorsLocalAdaptationSensitivity = GanglioncellsSensitivity = 0.89`. % - % ![image](http://docs.opencv.org/3.1.0/retinaOutput_realistic.jpg) + % ![image](https://docs.opencv.org/3.3.1/retinaOutput_realistic.jpg) % % As observed in this preliminary demo, the retina can be settled up with % various parameters, by default, as shown on the figure above, the retina @@ -171,12 +171,12 @@ % * apply retina with default parameters along with the following changes % (generic parameters used for the presented illustrations of the % section): - % * `HorizontalCellsGain=0.4` (the main change compared to the - % default configuration: it strongly reduces halo effects) - % * `PhotoreceptorsLocalAdaptationSensitivity=0.99` (a little - % higher than default value to enforce local adaptation) - % * `GanglionCellsSensitivity=0.95` (also slightly higher than - % default for local adaptation enforcement) + % * `HorizontalCellsGain=0.4` (the main change compared to the default + % configuration: it strongly reduces halo effects) + % * `PhotoreceptorsLocalAdaptationSensitivity=0.99` (a little higher + % than default value to enforce local adaptation) + % * `GanglionCellsSensitivity=0.95` (also slightly higher than default + % for local adaptation enforcement) % * get the parvo output using the cv.Retina.getParvo method. % % Have a look at the end of this page to see how to specify these @@ -189,13 +189,13 @@ % Original image comes from [OpenEXR](http://openexr.com/) samples % (`openexr-images-1.7.0/ScanLines/CandleGlass.exr`) % - % ![image](http://docs.opencv.org/3.1.0/HDRtoneMapping_candleSample.jpg) + % ![image](https://docs.opencv.org/3.3.1/HDRtoneMapping_candleSample.jpg) % % - HDR image tone mapping example with the same generic parameters. % Original image comes from % [memorial.exr](http://www.pauldebevec.com/Research/HDR/memorial.exr) % - % ![image](http://docs.opencv.org/3.1.0/HDRtoneMapping_memorialSample.jpg) + % ![image](https://docs.opencv.org/3.3.1/HDRtoneMapping_memorialSample.jpg) % % #### Motion and event detection using the Magnocellular pathway (magno retina output) % @@ -232,14 +232,14 @@ % Magno channel highligths moving persons, observe the energy mapping on % the one on top, partly behind a dark glass. % - % ![image](http://docs.opencv.org/3.1.0/VideoDemo_RGB_PETS2006.jpg) + % ![image](https://docs.opencv.org/3.3.1/VideoDemo_RGB_PETS2006.jpg) % % - Retina processing on gray levels image sequence: example from % [CDNET](http://changedetection.net/) (thermal/park). % On such grayscale images, parvo channel enforces contrasts while mango % strongly reacts on moving pedestrians % - % ![image](http://docs.opencv.org/3.1.0/VideoDemo_thermal_park.jpg) + % ![image](https://docs.opencv.org/3.3.1/VideoDemo_thermal_park.jpg) % % ### Literature % @@ -263,8 +263,8 @@ % originates from Jeanny's discussions. More informations in the above % cited Jeanny Heraults's book. % - % - Meylan&al work on HDR tone mapping that is implemented as a specific - % method within the model [Meylan2007] + % - Meylan et al. work on HDR tone mapping that is implemented as a + % specific method within the model [Meylan2007] % % ## Retina programming interfaces % @@ -282,31 +282,32 @@ % % The default configuration is presented below. % - % - % - % - % 1 - % 1 - % 7.5e-01 - % 9.0e-01 - % 5.7e-01 - % 0.01 - % 0.5 - % 7. - % 7.5e-01 - % - % - % 1 - % 0. - % 0. - % 7. - % 2.0e+00 - % 9.5e-01 - % 0. - % 7. - % - % - % + % ```xml + % + % + % + % 1 + % 1 + % 7.5e-01 + % 9.0e-01 + % 5.7e-01 + % 0.01 + % 0.5 + % 7. + % 7.5e-01 + % + % + % 1 + % 0. + % 0. + % 7. + % 2.0e+00 + % 9.5e-01 + % 0. + % 7. + % + % + % ``` % % Here are some words about all those parameters, tweak them as you wish % to amplify or moderate retina effects (contours enforcement, halos @@ -448,10 +449,10 @@ % - load a frist input image to get its size % - allocate a retina instance with appropriate input size % - loop over grabbed frames: - % - grab a new frame - % - run on a frame - % - call the two output getters - % - display retina outputs + % - grab a new frame + % - run on a frame + % - call the two output getters + % - display retina outputs % % See `retina_demo.m` MATLAB sample. % @@ -492,40 +493,40 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = Retina(inputSize, varargin) %RETINA Constructor from standardized interface to create a Retina instance % - % obj = cv.Retina(inputSize) - % obj = cv.Retina(inputSize, 'OptionName',optionValue, ...) + % obj = cv.Retina(inputSize) + % obj = cv.Retina(inputSize, 'OptionName',optionValue, ...) % % ## Input % * __inputSize__ the input frame size `[w,h]`. % % ## Options % * __ColorMode__ the chosen processing mode: with or without - % color processing. default true + % color processing. default true % * __ColorSamplingMethod__ specifies which kind of color sampling - % will be used, default 'Bayer'. One of: - % * __Random__ each pixel position is either R, G or B in a - % random choice - % * __Diagonal__ color sampling is RGBRGBRGB..., - % line 2 BRGBRGBRG..., line 3, GBRGBRGBR... - % * __Bayer__ standard bayer sampling + % will be used, default 'Bayer'. One of: + % * __Random__ each pixel position is either R, G or B in a + % random choice + % * __Diagonal__ color sampling is RGBRGBRGB..., + % line 2 BRGBRGBRG..., line 3 GBRGBRGBR... + % * __Bayer__ standard bayer sampling % * __UseRetinaLogSampling__ activate retina log sampling. If true, - % the 2 following parameters can be used. default false + % the 2 following parameters can be used. default false % * __ReductionFactor__ only useful if param - % `UseRetinaLogSampling=true`, specifies the reduction factor - % of the output frame (as the center (fovea) is high - % resolution and corners can be underscaled, then a reduction - % of the output is allowed without precision leak). - % default 1.0 + % `UseRetinaLogSampling=true`, specifies the reduction factor of + % the output frame (as the center (fovea) is high resolution and + % corners can be underscaled, then a reduction of the output is + % allowed without precision leak). default 1.0 % * __SamplingStrength__ only useful if param - % `UseRetinaLogSampling=true`, specifies the strength of the - % log scale that is applied. default 10.0 + % `UseRetinaLogSampling=true`, specifies the strength of the log + % scale that is applied. default 10.0 % % See also: cv.Retina.run % @@ -535,7 +536,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.Retina % @@ -549,7 +550,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.Retina.empty, cv.Retina.load % @@ -559,11 +560,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.Retina.clear, cv.Retina.load % @@ -573,7 +574,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -589,21 +590,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -617,11 +618,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.Retina.save, cv.Retina.load % @@ -634,7 +635,7 @@ function load(this, fname_or_str, varargin) function sz = getInputSize(this) %GETINPUTSIZE Retreive retina input buffer size % - % sz = obj.getInputSize() + % sz = obj.getInputSize() % % ## Output % * __sz__ the retina input buffer size @@ -647,7 +648,7 @@ function load(this, fname_or_str, varargin) function sz = getOutputSize(this) %GETOUTPUTSIZE Retreive retina output buffer size that can be different from the input if a spatial log transformation is applied % - % sz = obj.getOutputSize() + % sz = obj.getOutputSize() % % ## Output % * __sz__ the retina output buffer size @@ -660,15 +661,15 @@ function load(this, fname_or_str, varargin) function setup(this, retinaParameterFile, varargin) %SETUP Try to open an XML retina parameters file to adjust current retina instance setup % - % obj.setup(retinaParameterFile) - % obj.setup(retinaParameterFile, 'OptionName',optionValue, ...) + % obj.setup(retinaParameterFile) + % obj.setup(retinaParameterFile, 'OptionName',optionValue, ...) % % ## Input % * __retinaParameterFile__ the parameters filename. % % ## Options % * __ApplyDefaultSetupOnFailure__ set to true if an error must be - % thrown on error. default true + % thrown on error. default true % % If the xml file does not exist, then default setup is applied. % Warning: Exceptions are thrown if read XML file is not valid. @@ -685,13 +686,13 @@ function setup(this, retinaParameterFile, varargin) function setupParameters(this, varargin) %SETUPPARAMETERS Pass retina parameters to adjust current retina instance setup % - % obj.setupParameters('OptionName',optionValue, ...) + % obj.setupParameters('OptionName',optionValue, ...) % % ## Options % * __OPLandIplParvo__ Outer Plexiform Layer (OPL) and Inner - % Plexiform Layer Parvocellular (IplParvo) parameters. + % Plexiform Layer Parvocellular (IplParvo) parameters. % * __IplMagno__ Inner Plexiform Layer Magnocellular channel - % (IplMagno). + % (IplMagno). % % ### `OPLandIplParvo` options % See cv.Retina.setupOPLandIPLParvoChannel options. @@ -709,58 +710,62 @@ function setupParameters(this, varargin) % gives results such as the first retina output shown on the top % of this page. % - % - % - % - % 1 - % 1 - % 7.5e-01 - % 9.0e-01 - % 5.3e-01 - % 0.01 - % 0.5 - % 7. - % 7.5e-01 - % - % - % 1 - % 0. - % 0. - % 7. - % 2.0e+00 - % 9.5e-01 - % 0. - % 7. - % - % + % ```xml + % + % + % + % 1 + % 1 + % 7.5e-01 + % 9.0e-01 + % 5.3e-01 + % 0.01 + % 0.5 + % 7. + % 7.5e-01 + % + % + % 1 + % 0. + % 0. + % 7. + % 2.0e+00 + % 9.5e-01 + % 0. + % 7. + % + % + % ``` % % Here is the "realistic" setup used to obtain the second retina % output shown on the top of this page. % - % - % - % - % 1 - % 1 - % 8.9e-01 - % 9.0e-01 - % 5.3e-01 - % 0.3 - % 0.5 - % 7. - % 8.9e-01 - % - % - % 1 - % 0. - % 0. - % 7. - % 2.0e+00 - % 9.5e-01 - % 0. - % 7. - % - % + % ```xml + % + % + % + % 1 + % 1 + % 8.9e-01 + % 9.0e-01 + % 5.3e-01 + % 0.3 + % 0.5 + % 7. + % 8.9e-01 + % + % + % 1 + % 0. + % 0. + % 7. + % 2.0e+00 + % 9.5e-01 + % 0. + % 7. + % + % + % ``` % % See also: cv.Retina.setup, cv.Retina.setupOPLandIPLParvoChannel, % cv.Retina.setupIPLMagnoChannel @@ -771,46 +776,46 @@ function setupParameters(this, varargin) function setupOPLandIPLParvoChannel(this, varargin) %SETUPOPLANDIPLPARVOCHANNEL Setup the OPL and IPL parvo channels (see biologocal model) % - % obj.setupOPLandIPLParvoChannel('OptionName',optionValue, ...) + % obj.setupOPLandIPLParvoChannel('OptionName',optionValue, ...) % % ## Options % * __ColorMode__ specifies if (true) color is processed of not - % (false) to then processing gray level image. default true + % (false) to then processing gray level image. default true % * __NormaliseOutput__ specifies if (true) output is rescaled - % between 0 and 255 of not (false). default true + % between 0 and 255 of not (false). default true % * __PhotoreceptorsLocalAdaptationSensitivity__ the photoreceptors - % sensitivity renage is 0-1 (more log compression effect when - % value increases). default 0.7 + % sensitivity renage is 0-1 (more log compression effect when + % value increases). default 0.7 % * __PhotoreceptorsTemporalConstant__ the time constant of the - % first order low pass filter of the photoreceptors, use it - % to cut high temporal frequencies (noise or fast motion), - % unit is frames, typical value is 1 frame. default 0.5 + % first order low pass filter of the photoreceptors, use it to + % cut high temporal frequencies (noise or fast motion), unit is + % frames, typical value is 1 frame. default 0.5 % * __PhotoreceptorsSpatialConstant__ the spatial constant of the - % first order low pass filter of the photoreceptors, use it - % to cut high spatial frequencies (noise or thick contours), - % unit is pixels, typical value is 1 pixel. default 0.53 + % first order low pass filter of the photoreceptors, use it to + % cut high spatial frequencies (noise or thick contours), unit + % is pixels, typical value is 1 pixel. default 0.53 % * __HorizontalCellsGain__ gain of the horizontal cells network, - % if 0, then the mean value of the output is zero, if the - % parameter is near 1, then, the luminance is not filtered - % and is still reachable at the output, typicall value is 0. - % default 0.0 + % if 0, then the mean value of the output is zero, if the + % parameter is near 1, then, the luminance is not filtered and + % is still reachable at the output, typicall value is 0. + % default 0.0 % * __HCellsTemporalConstant__ the time constant of the first - % order low pass filter of the horizontal cells, use it to - % cut low temporal frequencies (local luminance variations), - % unit is frames, typical value is 1 frame, as the - % photoreceptors. default 1.0 + % order low pass filter of the horizontal cells, use it to cut + % low temporal frequencies (local luminance variations), unit is + % frames, typical value is 1 frame, as the photoreceptors. + % default 1.0 % * __HCellsSpatialConstant__ the spatial constant of the first - % order low pass filter of the horizontal cells, use it to - % cut low spatial frequencies (local luminance), unit is - % pixels, typical value is 5 pixel, this value is also used - % for local contrast computing when computing the local - % contrast adaptation at the ganglion cells level (Inner - % Plexiform Layer parvocellular channel model). default 7.0 + % order low pass filter of the horizontal cells, use it to cut + % low spatial frequencies (local luminance), unit is pixels, + % typical value is 5 pixel, this value is also used for local + % contrast computing when computing the local contrast + % adaptation at the ganglion cells level (Inner Plexiform Layer + % parvocellular channel model). default 7.0 % * __GanglionCellsSensitivity__ the compression strengh of the - % ganglion cells local adaptation output, set a value - % between 0.6 and 1 for best results, a high value increases - % more the low value sensitivity and the output saturates - % faster, recommended value: 0.7. default 0.7 + % ganglion cells local adaptation output, set a value between + % 0.6 and 1 for best results, a high value increases more the + % low value sensitivity and the output saturates faster, + % recommended value: 0.7. default 0.7 % % OPL is referred as Outer Plexiform Layer of the retina, it % allows the spatio-temporal filtering which withens the spectrum @@ -829,40 +834,38 @@ function setupOPLandIPLParvoChannel(this, varargin) function setupIPLMagnoChannel(this, varargin) %SETUPIPLMAGNOCHANNEL Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel % - % obj.setupIPLMagnoChannel('OptionName',optionValue, ...) + % obj.setupIPLMagnoChannel('OptionName',optionValue, ...) % % ## Options % * __NormaliseOutput__ specifies if (true) output is rescaled - % between 0 and 255 of not (false). default true + % between 0 and 255 of not (false). default true % * __ParasolCellsBeta__ the low pass filter gain used for local - % contrast adaptation at the IPL level of the retina (for - % ganglion cells local adaptation), typical value is 0. - % default 0.0 + % contrast adaptation at the IPL level of the retina (for + % ganglion cells local adaptation), typical value is 0. + % default 0.0 % * __ParasolCellsTau__ the low pass filter time constant used for - % local contrast adaptation at the IPL level of the retina - % (for ganglion cells local adaptation), unit is frame, - % typical value is 0 (immediate response). default 0.0 + % local contrast adaptation at the IPL level of the retina (for + % ganglion cells local adaptation), unit is frame, typical value + % is 0 (immediate response). default 0.0 % * __ParasolCellsK__ the low pass filter spatial constant used - % for local contrast adaptation at the IPL level of the - % retina (for ganglion cells local adaptation), unit is - % pixels, typical value is 5. default 7.0 + % for local contrast adaptation at the IPL level of the retina + % (for ganglion cells local adaptation), unit is pixels, typical + % value is 5. default 7.0 % * __AmacrinCellsTemporalCutFrequency__ the time constant of the - % first order high pass fiter of the magnocellular way - % (motion information channel), unit is frames, typical - % value is 1.2. default 1.2 + % first order high pass fiter of the magnocellular way (motion + % information channel), unit is frames, typical value is 1.2. + % default 1.2 % * __V0CompressionParameter__ the compression strengh of the - % ganglion cells local adaptation output, set a value - % between 0.6 and 1 for best results, a high value increases - % more the low value sensitivity and the output saturates - % faster, recommended value: 0.95. default 0.95 + % ganglion cells local adaptation output, set a value between + % 0.6 and 1 for best results, a high value increases more the + % low value sensitivity and the output saturates faster, + % recommended value: 0.95. default 0.95 % * __LocalAdaptintegrationTau__ specifies the temporal constant - % of the low pas filter involved in the computation of the - % local "motion mean" for the local adaptation computation. - % default 0.0 + % of the low pas filter involved in the computation of the local + % "motion mean" for the local adaptation computation. default 0.0 % * __LocalAdaptintegrationK__ specifies the spatial constant of - % the low pas filter involved in the computation of the - % local "motion mean" for the local adaptation computation. - % default 7.0 + % the low pas filter involved in the computation of the local + % "motion mean" for the local adaptation computation. default 7.0 % % This channel processes signals output from OPL processing stage % in peripheral vision, it allows motion information enhancement. @@ -877,7 +880,7 @@ function setupIPLMagnoChannel(this, varargin) function params = getParameters(this) %GETPARAMETERS Retrieve the current retina parameters values in a structure % - % params = obj.getParameters() + % params = obj.getParameters() % % ## Output % * __params__ the current parameters setup. @@ -890,11 +893,11 @@ function setupIPLMagnoChannel(this, varargin) function str = printSetup(this) %PRINTSETUP Outputs a string showing the used parameters setup % - % str = obj.printSetup() + % str = obj.printSetup() % % ## Output % * __str__ a string which contains formatted parameters - % information. + % information. % % See also: cv.Retina.getParameters % @@ -904,17 +907,16 @@ function setupIPLMagnoChannel(this, varargin) function varargout = write(this, fs) %WRITE Write xml/yml formated parameters information % - % obj.write(fs) - % str = obj.write(fs) + % obj.write(fs) + % str = obj.write(fs) % % ## Input % * __fs__ the filename of the xml file that will be open and - % writen with formatted parameters information. + % writen with formatted parameters information. % % ## Output % * __str__ optional output. If requested, the parameters are - % persisted to a string in memory instead of writing to - % disk. + % persisted to a string in memory instead of writing to disk. % % See also: cv.Retina.setup % @@ -924,11 +926,11 @@ function setupIPLMagnoChannel(this, varargin) function run(this, inputImage) %RUN Method which allows retina to be applied on an input image % - % obj.run(inputImage) + % obj.run(inputImage) % % ## Input % * __inputImage__ the input image to be processed, can be gray - % level or BGR coded in any format (from 8bit to 16bits). + % level or BGR coded in any format (from 8bit to 16bits). % % After run, encapsulated retina module is ready to deliver its % outputs using dedicated acccessors, see `getParvo` and @@ -936,22 +938,28 @@ function run(this, inputImage) % % See also: cv.Retina.getParvo, cv.Retina.getMagno % + if true + %HACK: temp fix to get around an issue when OpenCL is enabled + val = cv.Utils.useOptimized(); + cv.Utils.setUseOptimized(false); + cObj = onCleanup(@() cv.Utils.setUseOptimized(val)); + end Retina_(this.id, 'run', inputImage); end function outputToneMappedImage = applyFastToneMapping(this, inputImage) %APPLYFASTTONEMAPPING Method which processes an image in the aim to correct its luminance correct backlight problems, enhance details in shadows % - % outputToneMappedImage = obj.applyFastToneMapping(inputImage) + % outputToneMappedImage = obj.applyFastToneMapping(inputImage) % % ## Input % * __inputImage__ the input image to process (should be coded in - % float format `single`: 1/3/4-channels, the 4th channel - % won't be considered). + % float format `single`: 1/3/4-channels, the 4th channel won't + % be considered). % % ## Output % * __outputToneMappedImage__ the output 8bit/channel tone mapped - % image (`uint8` with 1/3-channels format). + % image (`uint8` with 1/3-channels format). % % This method is designed to perform High Dynamic Range image tone % mapping (compress >8bit/pixel images to 8bit/pixel). This is a @@ -974,16 +982,16 @@ function run(this, inputImage) function parvo = getParvo(this) %GETPARVO Accessor of the details channel of the retina (models foveal vision) % - % parvo = obj.getParvo() + % parvo = obj.getParvo() % % ## Output % * __parvo__ the output buffer, format can be: - % * a matrix, this output is rescaled for standard 8bits - % image processing use in OpenCV - % * RAW methods actually return a 1D matrix (encoding is - % R1, R2, ... Rn, G1, G2, ..., Gn, B1, B2, ...Bn), this - % output is the original retina filter model output, - % without any quantification or rescaling. + % * a matrix, this output is rescaled for standard 8bits image + % processing use in OpenCV + % * RAW methods actually return a 1D matrix (encoding is + % `R1, R2, ..., Rn, G1, G2, ..., Gn, B1, B2, ..., Bn`), this + % output is the original retina filter model output, without + % any quantification or rescaling. % % Warning, `getParvoRAW` methods return buffers that are not % rescaled within range [0;255] while the non RAW method allows a @@ -997,16 +1005,16 @@ function run(this, inputImage) function parvo = getParvoRAW(this) %GETPARVORAW Accessor of the details channel of the retina (models foveal vision) % - % parvo = obj.getParvoRAW() + % parvo = obj.getParvoRAW() % % ## Output % * __parvo__ the output buffer, format can be: - % * a matrix, this output is rescaled for standard 8bits - % image processing use in OpenCV - % * RAW methods actually return a 1D matrix (encoding is - % R1, R2, ... Rn, G1, G2, ..., Gn, B1, B2, ...Bn), this - % output is the original retina filter model output, - % without any quantification or rescaling. + % * a matrix, this output is rescaled for standard 8bits image + % processing use in OpenCV + % * RAW methods actually return a 1D matrix (encoding is + % `R1, R2, ..., Rn, G1, G2, ..., Gn, B1, B2, ..., Bn`), this + % output is the original retina filter model output, without + % any quantification or rescaling. % % Warning, `getParvoRAW` methods return buffers that are not % rescaled within range [0;255] while the non RAW method allows a @@ -1020,16 +1028,15 @@ function run(this, inputImage) function magno = getMagno(this) %GETMAGNO Accessor of the motion channel of the retina (models peripheral vision) % - % magno = obj.getMagno() + % magno = obj.getMagno() % % ## Output % * __magno__ the output buffer, format can be: - % * a matrix, this output is rescaled for standard 8bits - % image processing use in OpenCV - % * RAW methods actually return a 1D matrix (encoding is - % M1, M2,... Mn), this output is the original retina - % filter model output, without any quantification or - % rescaling. + % * a matrix, this output is rescaled for standard 8bits image + % processing use in OpenCV + % * RAW methods actually return a 1D matrix (encoding is + % `M1, M2, ..., Mn`), this output is the original retina filter + % model output, without any quantification or rescaling. % % Warning, `getMagnoRAW` methods return buffers that are not % rescaled within range [0;255] while the non RAW method allows a @@ -1043,16 +1050,15 @@ function run(this, inputImage) function magno = getMagnoRAW(this) %GETMAGNORAW Accessor of the motion channel of the retina (models peripheral vision) % - % magno = obj.getMagnoRAW() + % magno = obj.getMagnoRAW() % % ## Output % * __magno__ the output buffer, format can be: - % * a matrix, this output is rescaled for standard 8bits - % image processing use in OpenCV - % * RAW methods actually return a 1D matrix (encoding is - % M1, M2,... Mn), this output is the original retina - % filter model output, without any quantification or - % rescaling. + % * a matrix, this output is rescaled for standard 8bits image + % processing use in OpenCV + % * RAW methods actually return a 1D matrix (encoding is + % `M1, M2, ..., Mn`), this output is the original retina filter + % model output, without any quantification or rescaling. % % Warning, `getMagnoRAW` methods return buffers that are not % rescaled within range [0;255] while the non RAW method allows a @@ -1066,13 +1072,13 @@ function run(this, inputImage) function setColorSaturation(this, varargin) %SETCOLORSATURATION Activate color saturation as the final step of the color demultiplexing process % - % obj.setColorSaturation('OptionName',optionValue, ...) + % obj.setColorSaturation('OptionName',optionValue, ...) % % ## Options % * __SaturateColors__ boolean that activates color saturation (if - % true) or desactivate (if false). default true + % true) or desactivate (if false). default true % * __ColorSaturationValue__ the saturation factor: a simple - % factor applied on the chrominance buffers. default 4.0 + % factor applied on the chrominance buffers. default 4.0 % % This saturation is a sigmoide function applied to each channel % of the demultiplexed image. @@ -1085,7 +1091,7 @@ function setColorSaturation(this, varargin) function clearBuffers(this) %CLEARBUFFERS Clears all retina buffers % - % obj.clearBuffers() + % obj.clearBuffers() % % (equivalent to opening the eyes after a long period of eye % close) whatchout the temporal transition occuring just after @@ -1099,12 +1105,12 @@ function clearBuffers(this) function activateMovingContoursProcessing(this, activate) %ACTIVATEMOVINGCONTOURSPROCESSING Activate/desactivate the Magnocellular pathway processing (motion information extraction) % - % obj.activateMovingContoursProcessing(activate) + % obj.activateMovingContoursProcessing(activate) % % ## Input % * __activate__ true if Magnocellular output should be activated, - % false if not. If activated, the Magnocellular output can - % be retrieved using the cv.Retina.getMagno method. + % false if not. If activated, the Magnocellular output can be + % retrieved using the cv.Retina.getMagno method. % % By default, it is activated. % @@ -1116,13 +1122,13 @@ function activateMovingContoursProcessing(this, activate) function activateContoursProcessing(this, activate) %ACTIVATECONTOURSPROCESSING Activate/desactivate the Parvocellular pathway processing (contours information extraction) % - % obj.activateContoursProcessing(activate) + % obj.activateContoursProcessing(activate) % % ## Input % * __activate__ true if Parvocellular (contours information - % extraction) output should be activated, false if not. If - % activated, the Parvocellular output can be retrieved using - % the cv.Retina.getParvo method. + % extraction) output should be activated, false if not. If + % activated, the Parvocellular output can be retrieved using the + % cv.Retina.getParvo method. % % By default, it is activated. % diff --git a/opencv_contrib/+cv/RetinaFastToneMapping.m b/opencv_contrib/+cv/RetinaFastToneMapping.m index 8a2352d21..5ec2fcd39 100644 --- a/opencv_contrib/+cv/RetinaFastToneMapping.m +++ b/opencv_contrib/+cv/RetinaFastToneMapping.m @@ -1,5 +1,5 @@ classdef RetinaFastToneMapping < handle - %RETINAFASTTONEMAPPING Class with tone mapping algorithm of Meylan&al(2007) + %RETINAFASTTONEMAPPING Class with tone mapping algorithm of Meylan et al. (2007) % % High Dynamic Range (HDR >8bit images) tone mapping to (conversion to % 8bit) use case of the retina. @@ -44,14 +44,15 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = RetinaFastToneMapping(inputSize) %RETINAFASTTONEMAPPING Constructor % - % obj = cv.RetinaFastToneMapping(inputSize) + % obj = cv.RetinaFastToneMapping(inputSize) % % ## Input % * __inputSize__ input image size `[w,h]`. @@ -64,7 +65,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.RetinaFastToneMapping % @@ -78,9 +79,10 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % - % See also: cv.RetinaFastToneMapping.empty, cv.RetinaFastToneMapping.load + % See also: cv.RetinaFastToneMapping.empty, + % cv.RetinaFastToneMapping.load % RetinaFastToneMapping_(this.id, 'clear'); end @@ -88,13 +90,14 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % - % See also: cv.RetinaFastToneMapping.clear, cv.RetinaFastToneMapping.load + % See also: cv.RetinaFastToneMapping.clear, + % cv.RetinaFastToneMapping.load % b = RetinaFastToneMapping_(this.id, 'empty'); end @@ -102,7 +105,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -118,21 +121,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -146,13 +149,14 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % - % See also: cv.RetinaFastToneMapping.save, cv.RetinaFastToneMapping.load + % See also: cv.RetinaFastToneMapping.save, + % cv.RetinaFastToneMapping.load % name = RetinaFastToneMapping_(this.id, 'getDefaultName'); end @@ -163,16 +167,16 @@ function load(this, fname_or_str, varargin) function setup(this, varargin) %SETUP Updates tone mapping behaviors by adjusing the local luminance computation area % - % obj.setup('OptionName',optionValue, ...) + % obj.setup('OptionName',optionValue, ...) % % ## Options % * __PhotoreceptorsNeighborhoodRadius__ the first stage local - % adaptation area. default 3.0 + % adaptation area. default 3.0 % * __GanglioncellsNeighborhoodRadius__ the second stage local - % adaptation area. default 1.0 + % adaptation area. default 1.0 % * __MeanLuminanceModulatorK__ the factor applied to modulate the - % mean Luminance information (default is 1, see reference - % paper). default 1.0 + % mean Luminance information (default is 1, see reference paper). + % default 1.0 % % See also: cv.RetinaFastToneMapping.applyFastToneMapping % @@ -182,7 +186,7 @@ function setup(this, varargin) function outputToneMappedImage = applyFastToneMapping(this, inputImage) %APPLYFASTTONEMAPPING Applies a luminance correction (initially High Dynamic Range (HDR) tone mapping) % - % outputToneMappedImage = obj.applyFastToneMapping(inputImage) + % outputToneMappedImage = obj.applyFastToneMapping(inputImage) % % ## Input % * __inputImage__ the input image to process RGB or gray levels. diff --git a/opencv_contrib/+cv/SIFT.m b/opencv_contrib/+cv/SIFT.m index efd9570e9..34c302a44 100644 --- a/opencv_contrib/+cv/SIFT.m +++ b/opencv_contrib/+cv/SIFT.m @@ -13,36 +13,37 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SIFT(varargin) %SIFT Constructor % - % obj = cv.SIFT() - % obj = cv.SIFT(..., 'OptionName',optionValue, ...) + % obj = cv.SIFT() + % obj = cv.SIFT(..., 'OptionName',optionValue, ...) % % ## Options % * __NFeatures__ The number of best features to retain. The - % features are ranked by their scores (measured in SIFT - % algorithm as the local contrast). default 0 + % features are ranked by their scores (measured in SIFT + % algorithm as the local contrast). default 0 % * __NOctaveLayers__ The number of layers in each octave. 3 is - % the value used in D. Lowe paper. The number of octaves is - % computed automatically from the image resolution. default 3 + % the value used in D. Lowe paper. The number of octaves is + % computed automatically from the image resolution. default 3 % * __ConstrastThreshold__ The contrast threshold used to filter - % out weak features in semi-uniform (low-contrast) regions. - % The larger the threshold, the less features are produced - % by the detector. default 0.04 + % out weak features in semi-uniform (low-contrast) regions. The + % larger the threshold, the less features are produced by the + % detector. default 0.04 % * __EdgeThreshold__ The threshold used to filter out edge-like - % features. Note that the its meaning is different from the - % `ContrastThreshold`, i.e. the larger the `EdgeThreshold`, - % the less features are filtered out (more features are - % retained). default 10 + % features. Note that the its meaning is different from the + % `ContrastThreshold`, i.e. the larger the `EdgeThreshold`, the + % less features are filtered out (more features are retained). + % default 10 % * __Sigma__ The sigma of the Gaussian applied to the input image - % at the octave #0. If your image is captured with a weak - % camera with soft lenses, you might want to reduce the - % number. default 1.6 + % at the octave #0. If your image is captured with a weak camera + % with soft lenses, you might want to reduce the number. + % default 1.6 % % See also: cv.SIFT.detectAndCompute % @@ -52,7 +53,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SIFT % @@ -63,7 +64,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -77,7 +78,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SIFT.empty, cv.SIFT.load % @@ -87,11 +88,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SIFT.clear, cv.SIFT.load % @@ -101,7 +102,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -117,21 +118,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -145,11 +146,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SIFT.save, cv.SIFT.load % @@ -162,16 +163,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `L2` for SIFT. % @@ -183,7 +184,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in floats % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -198,7 +199,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -213,27 +214,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.SIFT.compute, cv.SIFT.detectAndCompute % @@ -243,26 +242,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.SIFT.detect, cv.SIFT.detectAndCompute @@ -273,41 +272,41 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a 128-element - % vector, as returned by cv.SIFT.descriptorSize, so the - % total size of descriptors will be - % `numel(keypoints) * obj.descriptorSize()`. A matrix of - % size N-by-128 of class `single`, one row per keypoint. + % vectors of descriptors. Each descriptor is a 128-element + % vector, as returned by cv.SIFT.descriptorSize, so the total + % size of descriptors will be + % `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-128 of class `single`, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % See also: cv.SIFT.detect, cv.SIFT.compute % diff --git a/opencv_contrib/+cv/SURF.m b/opencv_contrib/+cv/SURF.m index d82797734..50af70cc7 100644 --- a/opencv_contrib/+cv/SURF.m +++ b/opencv_contrib/+cv/SURF.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -36,19 +37,19 @@ % Basic or Extended descriptors. % % * __false__ means that the basic descriptors (64 elements each) - % shall be computed. This is the default. + % shall be computed. This is the default. % * __true__ means that the extended descriptors (128 elements each) - % shall be computed. + % shall be computed. Extended % Up-right or rotated features. % % * __false__ means that detector computes orientation of each - % feature. This is the default. + % feature. This is the default. % * __true__ means that the orientation is not computed (which is much - % much faster). For example, if you match images from a stereo - % pair, or do image stitching, the matched features likely have - % very similar angles, and you can speed up feature extraction - % by setting `Upright=true`. + % much faster). For example, if you match images from a stereo pair, + % or do image stitching, the matched features likely have very + % similar angles, and you can speed up feature extraction by setting + % `Upright=true`. Upright end @@ -56,22 +57,22 @@ function this = SURF(varargin) %SURF Constructor % - % obj = cv.SURF() - % obj = cv.SURF(..., 'OptionName',optionValue, ...) + % obj = cv.SURF() + % obj = cv.SURF(..., 'OptionName',optionValue, ...) % % ## Options % * __HessianThreshold__ Threshold for hessian keypoint detector - % used in SURF. default 100 + % used in SURF. default 100 % * __NOctaves__ Number of pyramid octaves the keypoint detector - % will use. default 4 + % will use. default 4 % * __NOctaveLayers__ Number of octave layers within each octave. - % default 3 + % default 3 % * __Extended__ Extended descriptor flag (true: use extended - % 128-element descriptors; false: use 64-element - % descriptors). default false + % 128-element descriptors; false: use 64-element descriptors). + % default false % * __Upright__ Up-right or rotated features flag (true: do not - % compute orientation of features; false: compute - % orientation). default false + % compute orientation of features; false: compute orientation). + % default false % % See also: cv.SURF.detectAndCompute % @@ -81,7 +82,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SURF % @@ -92,7 +93,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -106,7 +107,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SURF.empty, cv.SURF.load % @@ -116,11 +117,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SURF.clear, cv.SURF.load % @@ -130,7 +131,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -146,21 +147,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -174,11 +175,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SURF.save, cv.SURF.load % @@ -191,16 +192,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `L2` for SURF. % @@ -212,11 +213,11 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in floats % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size, either 64 or 128 (see the - % cv.SURF.Extended property). + % cv.SURF.Extended property). % % See also: cv.SURF.descriptorType, cv.SURF.compute % @@ -226,7 +227,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -241,27 +242,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.SURF.compute, cv.SURF.detectAndCompute % @@ -271,26 +270,26 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image or image set % - % [descriptors, keypoints] = obj.compute(img, keypoints) - % [descriptors, keypoints] = obj.compute(imgs, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input % * __img__ Image (first variant), 8-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for - % which a descriptor cannot be computed are removed. - % Sometimes new keypoints can be added, for example: cv.SIFT - % duplicates keypoint with several dominant orientations - % (for each orientation). In the first variant, this is a - % struct-array of detected keypoints. In the second variant, - % it is a cell-array, where `keypoints{i}` is a set of keypoints - % detected in `imgs{i}` (a struct-array like before). + % which a descriptor cannot be computed are removed. Sometimes + % new keypoints can be added, for example: cv.SIFT duplicates + % keypoint with several dominant orientations (for each + % orientation). In the first variant, this is a struct-array of + % detected keypoints. In the second variant, it is a cell-array, + % where `keypoints{i}` is a set of keypoints detected in + % `imgs{i}` (a struct-array like before). % % ## Output % * __descriptors__ Computed descriptors. In the second variant of - % the method `descriptors{i}` are descriptors computed for a - % `keypoints{i}`. Row `j` in `descriptors` (or - % `descriptors{i}`) is the descriptor for `j`-th keypoint. + % the method `descriptors{i}` are descriptors computed for a + % `keypoints{i}`. Row `j` in `descriptors` (or `descriptors{i}`) + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.SURF.detect, cv.SURF.detectAndCompute @@ -301,41 +300,41 @@ function load(this, fname_or_str, varargin) function [keypoints, descriptors] = detectAndCompute(this, img, varargin) %DETECTANDCOMPUTE Detects keypoints and computes their descriptors % - % [keypoints, descriptors] = obj.detectAndCompute(img) - % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) + % [keypoints, descriptors] = obj.detectAndCompute(img) + % [...] = obj.detectAndCompute(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image, input 8-bit grayscale image. % % ## Output % * __keypoints__ The detected keypoints. A 1-by-N structure array - % with the following fields: - % * __pt__ coordinates of the keypoint `[x,y]` - % * __size__ diameter of the meaningful keypoint neighborhood - % * __angle__ computed orientation of the keypoint (-1 if not - % applicable); it's in [0,360) degrees and measured - % relative to image coordinate system (y-axis is - % directed downward), i.e in clockwise. - % * __response__ the response by which the most strong - % keypoints have been selected. Can be used for further - % sorting or subsampling. - % * __octave__ octave (pyramid layer) from which the keypoint - % has been extracted. - % * **class_id** object class (if the keypoints need to be - % clustered by an object they belong to). + % with the following fields: + % * __pt__ coordinates of the keypoint `[x,y]` + % * __size__ diameter of the meaningful keypoint neighborhood + % * __angle__ computed orientation of the keypoint (-1 if not + % applicable); it's in [0,360) degrees and measured relative + % to image coordinate system (y-axis is directed downward), + % i.e in clockwise. + % * __response__ the response by which the most strong keypoints + % have been selected. Can be used for further sorting or + % subsampling. + % * __octave__ octave (pyramid layer) from which the keypoint + % has been extracted. + % * **class_id** object class (if the keypoints need to be + % clustered by an object they belong to). % * __descriptors__ Computed descriptors. Output concatenated - % vectors of descriptors. Each descriptor is a 64- or - % 128-element vector, as returned by cv.SURF.descriptorSize, - % so the total size of descriptors will be - % `numel(keypoints) * obj.descriptorSize()`. A matrix of - % size N-by-(64/128) of class `single`, one row per keypoint. + % vectors of descriptors. Each descriptor is a 64 or 128 element + % vector, as returned by cv.SURF.descriptorSize, so the total + % size of descriptors will be + % `numel(keypoints) * obj.descriptorSize()`. A matrix of size + % N-by-(64/128) of class `single`, one row per keypoint. % % ## Options % * __Mask__ optional mask specifying where to look for keypoints. - % Not set by default. + % Not set by default. % * __Keypoints__ If passed, then the method will use the provided - % vector of keypoints instead of detecting them, and the - % algorithm just computes their descriptors. + % vector of keypoints instead of detecting them, and the + % algorithm just computes their descriptors. % % The function is parallelized with the TBB library. % diff --git a/opencv_contrib/+cv/SelectiveSearchSegmentation.m b/opencv_contrib/+cv/SelectiveSearchSegmentation.m index 2d2bef6e3..89ab12f68 100644 --- a/opencv_contrib/+cv/SelectiveSearchSegmentation.m +++ b/opencv_contrib/+cv/SelectiveSearchSegmentation.m @@ -13,7 +13,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end %% Constructor/destructor @@ -21,7 +22,7 @@ function this = SelectiveSearchSegmentation() %SELECTIVESEARCHSEGMENTATION Constructor % - % obj = cv.SelectiveSearchSegmentation() + % obj = cv.SelectiveSearchSegmentation() % % See also: cv.SelectiveSearchSegmentation.process % @@ -31,7 +32,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SelectiveSearchSegmentation % @@ -45,7 +46,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SelectiveSearchSegmentation.empty, % cv.SelectiveSearchSegmentation.load @@ -56,11 +57,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SelectiveSearchSegmentation.clear, % cv.SelectiveSearchSegmentation.load @@ -71,7 +72,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -87,21 +88,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -115,11 +116,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SelectiveSearchSegmentation.save, % cv.SelectiveSearchSegmentation.load @@ -133,7 +134,7 @@ function load(this, fname_or_str, varargin) function setBaseImage(this, img) %SETBASEIMAGE Set a image used by switch functions to initialize the class % - % obj.setBaseImage(img) + % obj.setBaseImage(img) % % ## Input % * __img__ The image. @@ -146,13 +147,13 @@ function setBaseImage(this, img) function switchToSingleStrategy(this, varargin) %SWITCHTOSINGLESTRATEGY Initialize the class with the 'Single strategy' parameters % - % obj.switchToSingleStrategy() - % obj.switchToSingleStrategy('OptionName',optionValue, ...) + % obj.switchToSingleStrategy() + % obj.switchToSingleStrategy('OptionName',optionValue, ...) % % ## Options % * __K__ The k parameter for the graph segmentation. default 200 % * __Sigma__ The sigma parameter for the graph segmentation. - % default 0.8 + % default 0.8 % % As described in [uijlings2013selective]. % @@ -164,16 +165,16 @@ function switchToSingleStrategy(this, varargin) function switchToSelectiveSearchFast(this, varargin) %SWITCHTOSELECTIVESEARCHFAST Initialize the class with the 'Selective search fast' parameters % - % obj.switchToSelectiveSearchFast() - % obj.switchToSelectiveSearchFast('OptionName',optionValue, ...) + % obj.switchToSelectiveSearchFast() + % obj.switchToSelectiveSearchFast('OptionName',optionValue, ...) % % ## Options % * __BaseK__ The k parameter for the first graph segmentation. - % default 150 + % default 150 % * __IncK__ The increment of the k parameter for all graph - % segmentations. default 150 + % segmentations. default 150 % * __Sigma__ The sigma parameter for the graph segmentation. - % default 0.8 + % default 0.8 % % As described in [uijlings2013selective]. % @@ -185,16 +186,16 @@ function switchToSelectiveSearchFast(this, varargin) function switchToSelectiveSearchQuality(this, varargin) %SWITCHTOSELECTIVESEARCHQUALITY Initialize the class with the 'Selective search fast' parameters % - % obj.switchToSelectiveSearchQuality() - % obj.switchToSelectiveSearchQuality('OptionName',optionValue, ...) + % obj.switchToSelectiveSearchQuality() + % obj.switchToSelectiveSearchQuality('OptionName',optionValue, ...) % % ## Options % * __BaseK__ The k parameter for the first graph segmentation. - % default 150 + % default 150 % * __IncK__ The increment of the k parameter for all graph - % segmentations. default 150 + % segmentations. default 150 % * __Sigma__ The sigma parameter for the graph segmentation. - % default 0.8 + % default 0.8 % % As described in [uijlings2013selective]. % @@ -206,7 +207,7 @@ function switchToSelectiveSearchQuality(this, varargin) function addImage(this, img) %ADDIMAGE Add a new image in the list of images to process % - % obj.addImage(img) + % obj.addImage(img) % % ## Input % * __img__ The image. @@ -219,7 +220,7 @@ function addImage(this, img) function clearImages(this) %CLEARIMAGES Clear the list of images to process % - % obj.clearImages() + % obj.clearImages() % % See also: cv.SelectiveSearchSegmentation.addImage % @@ -229,12 +230,12 @@ function clearImages(this) function addGraphSegmentation(this, varargin) %ADDGRAPHSEGMENTATION Add a new graph segmentation in the list of graph segementations to process % - % obj.addGraphSegmentation() - % obj.addGraphSegmentation('OptionName',optionValue, ...) + % obj.addGraphSegmentation() + % obj.addGraphSegmentation('OptionName',optionValue, ...) % % ## Options % * __Sigma__ The sigma parameter, used to smooth image. - % default 0.5 + % default 0.5 % * __K__ The k parameter of the algorithm. default 300 % * __MinSize__ The minimum size of segments. default 100 % @@ -247,7 +248,7 @@ function addGraphSegmentation(this, varargin) function clearGraphSegmentations(this) %CLEARGRAPHSEGMENTATIONS Clear the list of graph segmentations to process % - % obj.clearGraphSegmentations() + % obj.clearGraphSegmentations() % % See also: cv.SelectiveSearchSegmentation.addGraphSegmentation, % cv.GraphSegmentation @@ -258,18 +259,18 @@ function clearGraphSegmentations(this) function addStrategy(this, stype, varargin) %ADDSTRATEGY Add a new strategy in the list of strategy to process % - % obj.addStrategy(stype) - % obj.addStrategy('Multiple', stype, stype, ...) + % obj.addStrategy(stype) + % obj.addStrategy('Multiple', stype, stype, ...) % % ## Input % * __stype__ The strategy type for the selective search - % segmentation algorithm, one of: - % * __Color__ Color-based strategy. - % * __Size__ Size-based strategy. - % * __Texture__ Texture-based strategy. - % * __Fill__ Fill-based strategy. - % * __Multiple__ Regroup multiple strategies, where all - % sub-strategies have equal weights. + % segmentation algorithm, one of: + % * __Color__ Color-based strategy. + % * __Size__ Size-based strategy. + % * __Texture__ Texture-based strategy. + % * __Fill__ Fill-based strategy. + % * __Multiple__ Regroup multiple strategies, where all + % sub-strategies have equal weights. % % The classes are implemented from the algorithm described in % [uijlings2013selective]. @@ -282,7 +283,7 @@ function addStrategy(this, stype, varargin) function clearStrategies(this) %CLEARSTRATEGIES Clear the list of strategy to process % - % obj.clearStrategies() + % obj.clearStrategies() % % See also: cv.SelectiveSearchSegmentation.addStrategy % @@ -292,12 +293,12 @@ function clearStrategies(this) function rects = process(this) %PROCESSIMAGE Based on all images, graph segmentations and stragies, computes all possible rects and return them % - % rects = obj.process() + % rects = obj.process() % % ## Output % * __rects__ The list of rects as a Nx4 numeric matrix - % `[x,y,w,h; ...]`. The first ones are more relevents than - % the lasts ones. + % `[x,y,w,h; ...]`. The first ones are more relevents than the + % lasts ones. % % See also: cv.SelectiveSearchSegmentation.setBaseImage % diff --git a/opencv_contrib/+cv/SimpleWB.m b/opencv_contrib/+cv/SimpleWB.m index 9efa2d43c..596ac4b73 100644 --- a/opencv_contrib/+cv/SimpleWB.m +++ b/opencv_contrib/+cv/SimpleWB.m @@ -10,7 +10,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -31,7 +32,7 @@ function this = SimpleWB() %SIMPLEWB Creates an instance of SimpleWB % - % obj = cv.SimpleWB() + % obj = cv.SimpleWB() % % See also: cv.SimpleWB.balanceWhite % @@ -41,7 +42,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SimpleWB % @@ -55,7 +56,7 @@ function delete(this) function dst = balanceWhite(this, src) %BALANCEWHITE Applies white balancing to the input image % - % dst = obj.balanceWhite(src) + % dst = obj.balanceWhite(src) % % ## Input % * __src__ Input image. @@ -74,7 +75,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SimpleWB.empty % @@ -84,11 +85,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.SimpleWB.clear % @@ -98,11 +99,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SimpleWB.save, cv.SimpleWB.load % @@ -112,7 +113,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -127,21 +128,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/StarDetector.m b/opencv_contrib/+cv/StarDetector.m index 21dc84f49..07c3af38e 100644 --- a/opencv_contrib/+cv/StarDetector.m +++ b/opencv_contrib/+cv/StarDetector.m @@ -11,29 +11,30 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = StarDetector(varargin) %STARDETECTOR The full constructor % - % obj = cv.StarDetector() - % obj = cv.StarDetector(..., 'OptionName',optionValue, ...) + % obj = cv.StarDetector() + % obj = cv.StarDetector(..., 'OptionName',optionValue, ...) % % ## Options % * __MaxSize__ maximum size of the features. The following values - % are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, - % 90, 128. In the case of a different value the result is - % undefined. default 45 + % are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, + % 90, 128. In the case of a different value the result is + % undefined. default 45 % * __ResponseThreshold__ threshold for the approximated laplacian, - % used to eliminate weak features. The larger it is, the less - % features will be retrieved. default 30 + % used to eliminate weak features. The larger it is, the less + % features will be retrieved. default 30 % * __LineThresholdProjected__ another threshold for the laplacian - % to eliminate edges. default 10 + % to eliminate edges. default 10 % * __LineThresholdBinarized__ yet another threshold for the - % feature size to eliminate edges. The larger the 2nd - % threshold, the more points you get. default 8 + % feature size to eliminate edges. The larger the 2nd threshold, + % the more points you get. default 8 % * __SuppressNonmaxSize__ default 5 % % See also: cv.StarDetector.detect @@ -44,7 +45,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.StarDetector % @@ -55,7 +56,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -69,7 +70,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StarDetector.empty, cv.StarDetector.load % @@ -79,11 +80,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.StarDetector.clear % @@ -93,7 +94,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -109,21 +110,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -137,11 +138,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StarDetector.save, cv.StarDetector.load % @@ -154,27 +155,25 @@ function load(this, fname_or_str, varargin) function keypoints = detect(this, img, varargin) %DETECT Detects keypoints in an image or image set % - % keypoints = obj.detect(img) - % keypoints = obj.detect(imgs) - % [...] = obj.detect(..., 'OptionName',optionValue, ...) + % keypoints = obj.detect(img) + % keypoints = obj.detect(imgs) + % [...] = obj.detect(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ Image (first variant), 8-bit/16-bit grayscale image. % * __imgs__ Image set (second variant), cell array of images. % % ## Output - % * __keypoints__ The detected keypoints. In the first variant, - % a 1-by-N structure array. In the second variant of the - % method, `keypoints{i}` is a set of keypoints detected in - % `imgs{i}`. + % * __keypoints__ The detected keypoints. In the first variant, a + % 1-by-N structure array. In the second variant of the method, + % `keypoints{i}` is a set of keypoints detected in `imgs{i}`. % % ## Options % * __Mask__ A mask specifying where to look for keypoints - % (optional). It must be a logical or 8-bit integer matrix - % with non-zero values in the region of interest. In the - % second variant, it is a cell-array of masks for each input - % image, `masks{i}` is a mask for `imgs{i}`. - % Not set by default. + % (optional). It must be a logical or 8-bit integer matrix with + % non-zero values in the region of interest. In the second + % variant, it is a cell-array of masks for each input image, + % `masks{i}` is a mask for `imgs{i}`. Not set by default. % % See also: cv.StarDetector.StarDetector % diff --git a/opencv_contrib/+cv/StaticSaliencyFineGrained.m b/opencv_contrib/+cv/StaticSaliencyFineGrained.m index 6aecce63c..ec1c949e6 100644 --- a/opencv_contrib/+cv/StaticSaliencyFineGrained.m +++ b/opencv_contrib/+cv/StaticSaliencyFineGrained.m @@ -24,7 +24,7 @@ % % Saliency UML diagram: % - % ![image](http://docs.opencv.org/3.1.0/saliency.png) + % ![image](https://docs.opencv.org/3.3.1/saliency.png) % % To see how API works, try tracker demo: `computeSaliency_demo.m`. % @@ -50,14 +50,15 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = StaticSaliencyFineGrained() %STATICSALIENCYFINEGRAINED Constructor, creates a specialized saliency algorithm of this type % - % obj = cv.StaticSaliencyFineGrained() + % obj = cv.StaticSaliencyFineGrained() % % See also: cv.StaticSaliencyFineGrained.computeSaliency % @@ -67,7 +68,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.StaticSaliencyFineGrained % @@ -81,7 +82,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StaticSaliencyFineGrained.empty, % cv.StaticSaliencyFineGrained.load @@ -92,11 +93,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.StaticSaliencyFineGrained.clear, % cv.StaticSaliencyFineGrained.load @@ -107,7 +108,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +124,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +152,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StaticSaliencyFineGrained.save, % cv.StaticSaliencyFineGrained.load @@ -166,27 +167,14 @@ function load(this, fname_or_str, varargin) %% Saliency methods - function className = getClassName(this) - %GETCLASSNAME Get the name of the specific saliency type - % - % className = obj.getClassName() - % - % ## Output - % * __className__ The name of the tracker initializer. - % - % See also: cv.StaticSaliencyFineGrained.StaticSaliencyFineGrained - % - className = StaticSaliencyFineGrained_(this.id, 'getClassName'); - end - function saliencyMap = computeSaliency(this, img) %COMPUTESALIENCY Compute the saliency % - % saliencyMap = obj.computeSaliency(img) + % saliencyMap = obj.computeSaliency(img) % % ## Input % * __img__ The input image, 8-bit 1 or 3-channel (internally - % converted to grayscale). + % converted to grayscale). % % ## Output % * __saliencyMap__ The computed saliency map, `uint8` matrix. @@ -202,15 +190,15 @@ function load(this, fname_or_str, varargin) function binaryMap = computeBinaryMap(this, saliencyMap) %COMPUTEBINARYMAP This function perform a binary map of given saliency map % - % binaryMap = obj.computeBinaryMap(saliencyMap) + % binaryMap = obj.computeBinaryMap(saliencyMap) % % ## Input % * __saliencyMap__ the saliency map obtained through one of the - % specialized algorithms, `single` matrix. + % specialized algorithms, `single` matrix. % % ## Output % * __binaryMap__ the binary map, `uint8` matrix with either 0 or - % 255 values. + % 255 values. % % This is obtained in this way: % In a first step, to improve the definition of interest areas and diff --git a/opencv_contrib/+cv/StaticSaliencySpectralResidual.m b/opencv_contrib/+cv/StaticSaliencySpectralResidual.m index dcc33fc43..321b4949f 100644 --- a/opencv_contrib/+cv/StaticSaliencySpectralResidual.m +++ b/opencv_contrib/+cv/StaticSaliencySpectralResidual.m @@ -24,7 +24,7 @@ % % Saliency UML diagram: % - % ![image](http://docs.opencv.org/3.1.0/saliency.png) + % ![image](https://docs.opencv.org/3.3.1/saliency.png) % % To see how API works, try tracker demo: `computeSaliency_demo.m`. % @@ -53,7 +53,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -73,7 +74,7 @@ function this = StaticSaliencySpectralResidual() %STATICSALIENCYSPECTRALRESIDUAL Constructor, creates a specialized saliency algorithm of this type % - % obj = cv.StaticSaliencySpectralResidual() + % obj = cv.StaticSaliencySpectralResidual() % % See also: cv.StaticSaliencySpectralResidual.computeSaliency % @@ -83,7 +84,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.StaticSaliencySpectralResidual % @@ -97,7 +98,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StaticSaliencySpectralResidual.empty, % cv.StaticSaliencySpectralResidual.load @@ -108,11 +109,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.StaticSaliencySpectralResidual.clear, % cv.StaticSaliencySpectralResidual.load @@ -123,7 +124,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -139,21 +140,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -167,11 +168,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StaticSaliencySpectralResidual.save, % cv.StaticSaliencySpectralResidual.load @@ -182,27 +183,14 @@ function load(this, fname_or_str, varargin) %% Saliency methods - function className = getClassName(this) - %GETCLASSNAME Get the name of the specific saliency type - % - % className = obj.getClassName() - % - % ## Output - % * __className__ The name of the tracker initializer. - % - % See also: cv.StaticSaliencySpectralResidual.StaticSaliencySpectralResidual - % - className = StaticSaliencySpectralResidual_(this.id, 'getClassName'); - end - function saliencyMap = computeSaliency(this, img) %COMPUTESALIENCY Compute the saliency % - % saliencyMap = obj.computeSaliency(img) + % saliencyMap = obj.computeSaliency(img) % % ## Input % * __img__ The input image, 1 or 3-channel (internally converted - % to grayscale). + % to grayscale). % % ## Output % * __saliencyMap__ The computed saliency map, `single` matrix. @@ -222,15 +210,15 @@ function load(this, fname_or_str, varargin) function binaryMap = computeBinaryMap(this, saliencyMap) %COMPUTEBINARYMAP This function perform a binary map of given saliency map % - % binaryMap = obj.computeBinaryMap(saliencyMap) + % binaryMap = obj.computeBinaryMap(saliencyMap) % % ## Input % * __saliencyMap__ the saliency map obtained through one of the - % specialized algorithms, `single` matrix. + % specialized algorithms, `single` matrix. % % ## Output % * __binaryMap__ the binary map, `uint8` matrix with either 0 or - % 255 values. + % 255 values. % % This is obtained in this way: % In a first step, to improve the definition of interest areas and diff --git a/opencv_contrib/+cv/StructuredEdgeDetection.m b/opencv_contrib/+cv/StructuredEdgeDetection.m index 3cdc347fc..86810a1cb 100644 --- a/opencv_contrib/+cv/StructuredEdgeDetection.m +++ b/opencv_contrib/+cv/StructuredEdgeDetection.m @@ -9,18 +9,18 @@ % algorithms, i.e. algorithms which somehow takes into account pixel % affinities in natural images. % - % ![image01](http://docs.opencv.org/3.1.0/01.jpg) - % ![image02](http://docs.opencv.org/3.1.0/02.jpg) - % ![image03](http://docs.opencv.org/3.1.0/03.jpg) - % ![image04](http://docs.opencv.org/3.1.0/04.jpg) - % ![image05](http://docs.opencv.org/3.1.0/05.jpg) - % ![image06](http://docs.opencv.org/3.1.0/06.jpg) - % ![image07](http://docs.opencv.org/3.1.0/07.jpg) - % ![image08](http://docs.opencv.org/3.1.0/08.jpg) - % ![image09](http://docs.opencv.org/3.1.0/09.jpg) - % ![image10](http://docs.opencv.org/3.1.0/10.jpg) - % ![image11](http://docs.opencv.org/3.1.0/11.jpg) - % ![image12](http://docs.opencv.org/3.1.0/12.jpg) + % ![image01](https://docs.opencv.org/3.3.1/01.jpg) + % ![image02](https://docs.opencv.org/3.3.1/02.jpg) + % ![image03](https://docs.opencv.org/3.3.1/03.jpg) + % ![image04](https://docs.opencv.org/3.3.1/04.jpg) + % ![image05](https://docs.opencv.org/3.3.1/05.jpg) + % ![image06](https://docs.opencv.org/3.3.1/06.jpg) + % ![image07](https://docs.opencv.org/3.3.1/07.jpg) + % ![image08](https://docs.opencv.org/3.3.1/08.jpg) + % ![image09](https://docs.opencv.org/3.3.1/09.jpg) + % ![image10](https://docs.opencv.org/3.3.1/10.jpg) + % ![image11](https://docs.opencv.org/3.3.1/11.jpg) + % ![image12](https://docs.opencv.org/3.3.1/12.jpg) % % ## References % [Dollar2013]: @@ -38,51 +38,51 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = StructuredEdgeDetection(model, varargin) %STRUCTUREDEDGEDETECTION The only constructor % - % obj = cv.StructuredEdgeDetection(model) - % obj = cv.StructuredEdgeDetection(model, howToGetFeatures) + % obj = cv.StructuredEdgeDetection(model) + % obj = cv.StructuredEdgeDetection(model, howToGetFeatures) % % ## Input % * __model__ name of the file where the model is stored. % * __howToGetFeatures__ optional name of MATLAB M-function that - % implements custom feature extractor. A helper function for - % training part of: "P. Dollar and C. L. Zitnick. Structured - % Forests for Fast Edge Detection, 2013". You need it only - % if you would like to train your own forest, otherwise - % leave it unspecified for the default implementation. See - % example below. + % implements custom feature extractor. A helper function for + % training part of: "P. Dollar and C. L. Zitnick. Structured + % Forests for Fast Edge Detection, 2013". You need it only if + % you would like to train your own forest, otherwise leave it + % unspecified for the default implementation. See example below. % % ## Example % The following is an example of a custom feature extractor % MATLAB function: % - % % This function extracts feature channels from src. The - % % StructureEdgeDetection uses this feature space to detect - % % edges. - % function features = myRFFeatureGetter(src, opts) - % % src: source image to extract features - % % features: output n-channel floating-point feature matrix - % % opts: struct of options - % gnrmRad = opts.gradientNormalizationRadius; - % gsmthRad = opts.gradientSmoothingRadius; - % shrink = opts.shrinkNumber; - % outNum = opts.numberOfOutputChannels; - % gradNum = opts.numberOfGradientOrientations; - % - % nsize = [size(src,1) size(src,2)] ./ shrink; - % features = zeros([nsize outNum], 'single'); - % % ... here your feature extraction code - % end + % % This function extracts feature channels from src. The + % % StructureEdgeDetection uses this feature space to detect + % % edges. + % function features = myRFFeatureGetter(src, opts) + % % src: source image to extract features + % % features: output n-channel floating-point feature matrix + % % opts: struct of options + % gnrmRad = opts.gradientNormalizationRadius; + % gsmthRad = opts.gradientSmoothingRadius; + % shrink = opts.shrinkNumber; + % outNum = opts.numberOfOutputChannels; + % gradNum = opts.numberOfGradientOrientations; + % + % nsize = [size(src,1) size(src,2)] ./ shrink; + % features = zeros([nsize outNum], 'single'); + % % ... here your feature extraction code + % end % % TODO: Custom extractor is not internally used in the current % cv.StructuredEdgeDetection implementation. See this - % [tutorial](http://docs.opencv.org/3.1.0/d2/d59/tutorial_ximgproc_training.html) + % [tutorial](https://docs.opencv.org/3.3.1/d2/d59/tutorial_ximgproc_training.html) % for more information about training your own structured forest % (it uses an external MATLAB toolbox for the training part). % @@ -94,7 +94,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.StructuredEdgeDetection % @@ -108,7 +108,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.StructuredEdgeDetection.empty, % cv.StructuredEdgeDetection.load @@ -119,11 +119,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.StructuredEdgeDetection.clear, % cv.StructuredEdgeDetection.load @@ -134,7 +134,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -150,21 +150,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -178,11 +178,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.StructuredEdgeDetection.save, % cv.StructuredEdgeDetection.load @@ -196,14 +196,14 @@ function load(this, fname_or_str, varargin) function dst = detectEdges(this, src) %DETECTEDGES The function detects edges in src and draw them to dst % - % dst = obj.detectEdges(src) + % dst = obj.detectEdges(src) % % ## Input % * __src__ source image (RGB, float, in [0;1]) to detect edges. % % ## Output % * __dst__ destination image (grayscale, float, in [0;1]) where - % edges are drawn. + % edges are drawn. % % The algorithm underlies this function is much more robust to % texture presence, than common approaches, e.g. cv.Sobel. @@ -216,7 +216,7 @@ function load(this, fname_or_str, varargin) function orientation_image = computeOrientation(this, edge_image) %COMPUTEORIENTATION Computes orientation map from edge image % - % orientation_image = obj.computeOrientation(edge_image) + % orientation_image = obj.computeOrientation(edge_image) % % ## Input % * **edge_image** edge image from `detectEdges` function. @@ -230,10 +230,10 @@ function load(this, fname_or_str, varargin) end function dst = edgesNms(this, edge_image, orientation_image, varargin) - %EDGESNMS Suppress edges + %EDGESNMS Suppress edges (nonmaximum suppression) % - % dst = obj.edgesNms(edge_image, orientation_image) - % dst = obj.edgesNms(..., 'OptionName',optionValue, ...) + % dst = obj.edgesNms(edge_image, orientation_image) + % dst = obj.edgesNms(..., 'OptionName',optionValue, ...) % % ## Input % * **edge_image** edge image from `detectEdges` function. @@ -247,7 +247,7 @@ function load(this, fname_or_str, varargin) % * __S__ radius for boundary suppression. default 0 % * __M__ multiplier for conservative suppression. default 1.0 % * __IsParallel__ enables/disables parallel computing. - % default true + % default true % % The function suppresses edges where edge is stronger in % orthogonal direction. diff --git a/opencv_contrib/+cv/SuperpixelLSC.m b/opencv_contrib/+cv/SuperpixelLSC.m index cf710ebc6..0369ce3e9 100644 --- a/opencv_contrib/+cv/SuperpixelLSC.m +++ b/opencv_contrib/+cv/SuperpixelLSC.m @@ -21,24 +21,25 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SuperpixelLSC(img, varargin) %SUPERPIXELLSC Class implementing the LSC (Linear Spectral Clustering) superpixels % - % obj = cv.SuperpixelLSC(img) - % obj = cv.SuperpixelLSC(img, 'OptionName',optionValue, ...) + % obj = cv.SuperpixelLSC(img) + % obj = cv.SuperpixelLSC(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ Image to segment. % % ## Options % * __RegionSize__ Chooses an average superpixel size measured in - % pixels. default 10 + % pixels. default 10 % * __Ratio__ Chooses the enforcement of superpixel compactness - % factor of superpixel. default 0.075 + % factor of superpixel. default 0.075 % % The function initializes a SuperpixelLSC object for the input % image. It sets the parameters of superpixel algorithm, which @@ -49,7 +50,7 @@ % preprocess image with little gaussian blur with a small 3x3 % kernel and additional conversion into CieLAB color space. % - % ![image](http://docs.opencv.org/3.1.0/superpixels_lsc.png) + % ![image](https://docs.opencv.org/3.3.1/superpixels_lsc.png) % % See also: cv.SuperpixelLSC.iterate % @@ -59,7 +60,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SuperpixelLSC % @@ -73,7 +74,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SuperpixelLSC.empty, cv.SuperpixelLSC.load % @@ -83,11 +84,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SuperpixelLSC.clear, cv.SuperpixelLSC.load % @@ -97,7 +98,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -113,21 +114,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -141,11 +142,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SuperpixelLSC.save, cv.SuperpixelLSC.load % @@ -158,7 +159,7 @@ function load(this, fname_or_str, varargin) function num = getNumberOfSuperpixels(this) %GETNUMBEROFSUPERPIXELS Calculates the actual amount of superpixels on a given segmentation computed and stored in object % - % num = obj.getNumberOfSuperpixels() + % num = obj.getNumberOfSuperpixels() % % ## Output % * __num__ actual amount of superpixels. @@ -171,12 +172,12 @@ function load(this, fname_or_str, varargin) function iterate(this, varargin) %ITERATE Calculates the superpixel segmentation on a given image with the initialized parameters in the object % - % obj.iterate() - % obj.iterate('OptionName',optionValue, ...) + % obj.iterate() + % obj.iterate('OptionName',optionValue, ...) % % ## Options % * __NumIterations__ Number of iterations. Higher number improves - % the result. default 10 + % the result. default 10 % % The function computes the superpixels segmentation of an image % with the parameters initialized with the constructor. The @@ -196,12 +197,12 @@ function iterate(this, varargin) function labels = getLabels(this) %GETLABELS Returns the segmentation labeling of the image % - % labels = obj.getLabels() + % labels = obj.getLabels() % % ## Output % * __labels__ Return a `int32` integer array containing the - % labels of the superpixel segmentation. The labels are in - % the range `[0, obj.getNumberOfSuperpixels()]`. + % labels of the superpixel segmentation. The labels are in the + % range `[0, obj.getNumberOfSuperpixels()]`. % % The function returns an image with the labels of the superpixel % segmentation. The labels are in the range @@ -218,17 +219,16 @@ function iterate(this, varargin) function img = getLabelContourMask(this, varargin) %GETLABELCONTOURMASK Returns the mask of the superpixel segmentation stored in object % - % img = obj.getLabelContourMask() - % img = obj.getLabelContourMask('OptionName',optionValue, ...) + % img = obj.getLabelContourMask() + % img = obj.getLabelContourMask('OptionName',optionValue, ...) % % ## Output % * __img__ Return `logical` image mask where 1 indicates that the - % pixel is a superpixel border, and 0 otherwise. + % pixel is a superpixel border, and 0 otherwise. % % ## Options % * __ThickLine__ If false, the border is only one pixel wide, - % otherwise all pixels at the border are masked. - % default true + % otherwise all pixels at the border are masked. default true % % The function return the boundaries of the superpixel % segmentation. @@ -242,15 +242,15 @@ function iterate(this, varargin) function enforceLabelConnectivity(this, varargin) %ENFORCELABELCONNECTIVITY Enforce label connectivity % - % obj.enforceLabelConnectivity() - % obj.enforceLabelConnectivity('OptionName',optionValue, ...) + % obj.enforceLabelConnectivity() + % obj.enforceLabelConnectivity('OptionName',optionValue, ...) % % ## Options % * __MinElementSize__ The minimum element size in percents that - % should be absorbed into a bigger superpixel. Given - % resulted average superpixel size valid value should be in - % 0-100 range, 25 means that less then a quarter sized - % superpixel should be absorbed, this is default. default 20 + % should be absorbed into a bigger superpixel. Given resulted + % average superpixel size valid value should be in 0-100 range, + % 25 means that less then a quarter sized superpixel should be + % absorbed, this is default. default 20 % % The function merge component that is too small, assigning the % previously found adjacent label to this component. Calling this diff --git a/opencv_contrib/+cv/SuperpixelSEEDS.m b/opencv_contrib/+cv/SuperpixelSEEDS.m index 8a0f05dd9..42ffd319e 100644 --- a/opencv_contrib/+cv/SuperpixelSEEDS.m +++ b/opencv_contrib/+cv/SuperpixelSEEDS.m @@ -23,35 +23,36 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SuperpixelSEEDS(image_size, num_superpixels, num_levels, varargin) %SUPERPIXELSEEDS Initializes a SuperpixelSEEDS object % - % obj = cv.SuperpixelSEEDS(image_size, num_superpixels, num_levels) - % obj = cv.SuperpixelSEEDS(..., 'OptionName',optionValue, ...) + % obj = cv.SuperpixelSEEDS(image_size, num_superpixels, num_levels) + % obj = cv.SuperpixelSEEDS(..., 'OptionName',optionValue, ...) % % ## Input % * **image_size** Image size specified as `[height,width]` or - % `[height,width,number_of_channels]`. + % `[height,width,number_of_channels]`. % * **num_superpixels** Desired number of superpixels. Note that - % the actual number may be smaller due to restrictions - % (depending on the image size and `num_levels`). Use - % cv.SuperpixelSEEDS.getNumberOfSuperpixels to get the - % actual number. + % the actual number may be smaller due to restrictions + % (depending on the image size and `num_levels`). Use + % cv.SuperpixelSEEDS.getNumberOfSuperpixels to get the actual + % number. % * **num_levels** Number of block levels. The more levels, the - % more accurate is the segmentation, but needs more memory - % and CPU time. Minimum is 2. + % more accurate is the segmentation, but needs more memory and + % CPU time. Minimum is 2. % % ## Options % * __Prior__ enable 3x3 shape smoothing term if `> 0`. A larger - % value leads to smoother shapes. Prior must be in the range - % [0, 5]. default 2 + % value leads to smoother shapes. Prior must be in the range + % [0, 5]. default 2 % * __HistogramBins__ Number of histogram bins. default 5 % * __DoubleStep__ If true, iterate each block level twice for - % higher accuracy. default false + % higher accuracy. default false % % The function initializes a SuperpixelSEEDS object for the input % image. It stores the parameters of the image: `image_width`, @@ -69,7 +70,7 @@ % level. An example of initialization of 4 block levels is % illustrated in the following figure. % - % ![image](http://docs.opencv.org/3.1.0/superpixels_blocks.png) + % ![image](https://docs.opencv.org/3.3.1/superpixels_blocks.png) % % See also: cv.SuperpixelSEEDS.iterate % @@ -80,7 +81,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SuperpixelSEEDS % @@ -94,7 +95,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SuperpixelSEEDS.empty, cv.SuperpixelSEEDS.load % @@ -104,11 +105,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SuperpixelSEEDS.clear, cv.SuperpixelSEEDS.load % @@ -118,7 +119,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -134,21 +135,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -162,11 +163,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SuperpixelSEEDS.save, cv.SuperpixelSEEDS.load % @@ -179,7 +180,7 @@ function load(this, fname_or_str, varargin) function num = getNumberOfSuperpixels(this) %GETNUMBEROFSUPERPIXELS Calculates the superpixel segmentation on a given image stored in object % - % num = obj.getNumberOfSuperpixels() + % num = obj.getNumberOfSuperpixels() % % ## Output % * __num__ actual amount of superpixels. @@ -195,19 +196,19 @@ function load(this, fname_or_str, varargin) function iterate(this, img, varargin) %ITERATE Calculates the superpixel segmentation on a given image with the initialized parameters in the object % - % obj.iterate(img) - % obj.iterate(img, 'OptionName',optionValue, ...) + % obj.iterate(img) + % obj.iterate(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ Input image. Supported formats: `uint8`, `uint16`, - % `single`. Image size & number of channels must match with - % the initialized image size & channels with the - % constructor. It should be in HSV or Lab color space. Lab - % is a bit better, but also slower. + % `single`. Image size & number of channels must match with the + % initialized image size & channels with the constructor. It + % should be in HSV or Lab color space. Lab is a bit better, but + % also slower. % % ## Options % * __NumIterations__ Number of iterations. Higher number improves - % the result. default 4 + % the result. default 4 % % The function computes the superpixels segmentation of an image % with the parameters initialized with the constructor. The @@ -217,7 +218,7 @@ function iterate(this, img, varargin) % proposing pixel updates. An illustrative example can be seen % below. % - % ![image](http://docs.opencv.org/3.1.0/superpixels_blocks2.png) + % ![image](https://docs.opencv.org/3.3.1/superpixels_blocks2.png) % % This function can be called again for other images without the % need of initializing the algorithm with constructor. This save @@ -232,12 +233,12 @@ function iterate(this, img, varargin) function labels = getLabels(this) %GETLABELS Returns the segmentation labeling of the image % - % labels = obj.getLabels() + % labels = obj.getLabels() % % ## Output % * __labels__ Return a `int32` integer array containing the - % labels of the superpixel segmentation. The labels are in - % the range `[0, obj.getNumberOfSuperpixels()]`. + % labels of the superpixel segmentation. The labels are in the + % range `[0, obj.getNumberOfSuperpixels()]`. % % The function returns an image with the labels of the superpixel % segmentation. The labels are in the range @@ -254,22 +255,21 @@ function iterate(this, img, varargin) function img = getLabelContourMask(this, varargin) %GETLABELCONTOURMASK Returns the mask of the superpixel segmentation stored in object % - % img = obj.getLabelContourMask() - % img = obj.getLabelContourMask('OptionName',optionValue, ...) + % img = obj.getLabelContourMask() + % img = obj.getLabelContourMask('OptionName',optionValue, ...) % % ## Output % * __img__ Return `logical` image mask where 1 indicates that the - % pixel is a superpixel border, and 0 otherwise. + % pixel is a superpixel border, and 0 otherwise. % % ## Options % * __ThickLine__ If false, the border is only one pixel wide, - % otherwise all pixels at the border are masked. - % default false + % otherwise all pixels at the border are masked. default false % % The function return the boundaries of the superpixel % segmentation. % - % ![image](http://docs.opencv.org/3.1.0/superpixels_demo.png) + % ![image](https://docs.opencv.org/3.3.1/superpixels_demo.png) % % See also: cv.SuperpixelSEEDS.iterate, boundarymask % diff --git a/opencv_contrib/+cv/SuperpixelSLIC.m b/opencv_contrib/+cv/SuperpixelSLIC.m index b9a895728..56087e3fd 100644 --- a/opencv_contrib/+cv/SuperpixelSLIC.m +++ b/opencv_contrib/+cv/SuperpixelSLIC.m @@ -39,35 +39,35 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = SuperpixelSLIC(img, varargin) %SUPERPIXELSLIC Initialize a SuperpixelSLIC object % - % obj = cv.SuperpixelSLIC(img) - % obj = cv.SuperpixelSLIC(img, 'OptionName',optionValue, ...) + % obj = cv.SuperpixelSLIC(img) + % obj = cv.SuperpixelSLIC(img, 'OptionName',optionValue, ...) % % ## Input % * __img__ Image to segment. % % ## Options % * __Algorithm__ Chooses the algorithm variant to use: - % * __SLIC__segments image using a desired `RegionSize` - % and compactness factor `Ruler` (the same compactnes - % for all superpixels in the image). - % * __SLICO__ segments image using a desired `RegionSize`, - % and in addition will choose an adaptive compactness - % factor for each superpixel differently. This is the - % default. - % * __MSLIC__ optimize using manifold methods resulting in - % more content-sensitive superpixels. + % * __SLIC__ segments image using a desired `RegionSize` and + % compactness factor `Ruler` (the same compactnes for all + % superpixels in the image). + % * __SLICO__ segments image using a desired `RegionSize`, and + % in addition will choose an adaptive compactness factor for + % each superpixel differently. This is the default. + % * __MSLIC__ optimize using manifold methods resulting in more + % content-sensitive superpixels. % * __RegionSize__ Chooses an average superpixel size measured in - % pixels. default 10 + % pixels. default 10 % * __Ruler__ Chooses the enforcement of superpixel smoothness - % factor of superpixel. Only considered for SLIC, has no - % effect on SLICO. default 10.0 + % factor of superpixel. Only considered for SLIC, has no effect + % on SLICO. default 10.0 % % The function initializes a SuperpixelSLIC object for the input % image. It sets the parameters of choosed superpixel algorithm, @@ -91,7 +91,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.SuperpixelSLIC % @@ -105,7 +105,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.SuperpixelSLIC.empty, cv.SuperpixelSLIC.load % @@ -115,11 +115,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.SuperpixelSLIC.clear, cv.SuperpixelSLIC.load % @@ -129,7 +129,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -145,21 +145,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -173,11 +173,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.SuperpixelSLIC.save, cv.SuperpixelSLIC.load % @@ -190,7 +190,7 @@ function load(this, fname_or_str, varargin) function num = getNumberOfSuperpixels(this) %GETNUMBEROFSUPERPIXELS Calculates the actual amount of superpixels on a given segmentation computed and stored in object % - % num = obj.getNumberOfSuperpixels() + % num = obj.getNumberOfSuperpixels() % % ## Output % * __num__ actual amount of superpixels. @@ -203,12 +203,12 @@ function load(this, fname_or_str, varargin) function iterate(this, varargin) %ITERATE Calculates the superpixel segmentation on a given image with the initialized parameters in the object % - % obj.iterate() - % obj.iterate('OptionName',optionValue, ...) + % obj.iterate() + % obj.iterate('OptionName',optionValue, ...) % % ## Options % * __NumIterations__ Number of iterations. Higher number improves - % the result. default 10 + % the result. default 10 % % The function computes the superpixels segmentation of an image % with the parameters initialized with the constructor. The @@ -228,12 +228,12 @@ function iterate(this, varargin) function labels = getLabels(this) %GETLABELS Returns the segmentation labeling of the image % - % labels = obj.getLabels() + % labels = obj.getLabels() % % ## Output % * __labels__ Return a `int32` integer array containing the - % labels of the superpixel segmentation. The labels are in - % the range `[0, obj.getNumberOfSuperpixels()]`. + % labels of the superpixel segmentation. The labels are in the + % range `[0, obj.getNumberOfSuperpixels()]`. % % The function returns an image with the labels of the superpixel % segmentation. The labels are in the range @@ -250,17 +250,16 @@ function iterate(this, varargin) function img = getLabelContourMask(this, varargin) %GETLABELCONTOURMASK Returns the mask of the superpixel segmentation stored in object % - % img = obj.getLabelContourMask() - % img = obj.getLabelContourMask('OptionName',optionValue, ...) + % img = obj.getLabelContourMask() + % img = obj.getLabelContourMask('OptionName',optionValue, ...) % % ## Output % * __img__ Return `logical` image mask where 1 indicates that the - % pixel is a superpixel border, and 0 otherwise. + % pixel is a superpixel border, and 0 otherwise. % % ## Options % * __ThickLine__ If false, the border is only one pixel wide, - % otherwise all pixels at the border are masked. - % default true + % otherwise all pixels at the border are masked. default true % % The function return the boundaries of the superpixel % segmentation. @@ -274,15 +273,15 @@ function iterate(this, varargin) function enforceLabelConnectivity(this, varargin) %ENFORCELABELCONNECTIVITY Enforce label connectivity % - % obj.enforceLabelConnectivity() - % obj.enforceLabelConnectivity('OptionName',optionValue, ...) + % obj.enforceLabelConnectivity() + % obj.enforceLabelConnectivity('OptionName',optionValue, ...) % % ## Options % * __MinElementSize__ The minimum element size in percents that - % should be absorbed into a bigger superpixel. Given - % resulted average superpixel size valid value should be in - % 0-100 range, 25 means that less then a quarter sized - % superpixel should be absorbed, this is default. default 25 + % should be absorbed into a bigger superpixel. Given resulted + % average superpixel size valid value should be in 0-100 range, + % 25 means that less then a quarter sized superpixel should be + % absorbed, this is default. default 25 % % The function merge component that is too small, assigning the % previously found adjacent label to this component. Calling this diff --git a/opencv_contrib/+cv/TransientAreasSegmentationModule.m b/opencv_contrib/+cv/TransientAreasSegmentationModule.m index 86fa4ce7f..5223f53f2 100644 --- a/opencv_contrib/+cv/TransientAreasSegmentationModule.m +++ b/opencv_contrib/+cv/TransientAreasSegmentationModule.m @@ -8,7 +8,7 @@ % data Based on Alexandre BENOIT thesis: % "Le systeme visuel humain au secours de la vision par ordinateur" % - % 3 spatio temporal filters are used: + % Three spatio temporal filters are used: % % - a first one which filters the noise and local variations of the input % motion energy @@ -28,18 +28,19 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = TransientAreasSegmentationModule(inputSize) %TRANSIENTAREASSEGMENTATIONMODULE Allocator % - % obj = cv.TransientAreasSegmentationModule(inputSize) + % obj = cv.TransientAreasSegmentationModule(inputSize) % % ## Input % * __inputSize__ size of the images input to segment `[w,h]` - % (output will be the same size). + % (output will be the same size). % % See also: cv.TransientAreasSegmentationModule.run % @@ -49,7 +50,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.TransientAreasSegmentationModule % @@ -63,7 +64,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.TransientAreasSegmentationModule.empty, % cv.TransientAreasSegmentationModule.load @@ -74,11 +75,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.TransientAreasSegmentationModule.clear, % cv.TransientAreasSegmentationModule.load @@ -89,7 +90,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -105,21 +106,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -133,11 +134,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.TransientAreasSegmentationModule.save, % cv.TransientAreasSegmentationModule.load @@ -151,7 +152,7 @@ function load(this, fname_or_str, varargin) function sz = getSize(this) %GETSIZE Return the size of the manage input and output images % - % sz = obj.getSize() + % sz = obj.getSize() % % ## Output % * __sz__ image size `[w,h]`. @@ -164,15 +165,15 @@ function load(this, fname_or_str, varargin) function setup(this, segmentationParameterFile, varargin) %SETUP Try to open an XML segmentation parameters file to adjust current segmentation instance setup % - % obj.setup(segmentationParameterFile) - % obj.setup(segmentationParameterFile, 'OptionName',optionValue, ...) + % obj.setup(segmentationParameterFile) + % obj.setup(segmentationParameterFile, 'OptionName',optionValue, ...) % % ## Input % * __segmentationParameterFile__ the parameters filename. % % ## Options % * __ApplyDefaultSetupOnFailure__ set to true if an error must be - % thrown on error. default true + % thrown on error. default true % % If the xml file does not exist, then default setup is applied. % Warning: Exceptions are thrown if read XML file is not valid. @@ -186,31 +187,31 @@ function setup(this, segmentationParameterFile, varargin) function setupParameters(this, varargin) %SETUPPARAMETERS Pass segmentation parameters to adjust current segmentation instance setup % - % obj.setupParameters('OptionName',optionValue, ...) + % obj.setupParameters('OptionName',optionValue, ...) % % ## Options % * __ThresholdON__ default 100 % * __ThresholdOFF__ default 100 % * __LocalEnergyTemporalConstant__ the time constant of the first - % order low pass filter, use it to cut high temporal - % frequencies (noise or fast motion), unit is frames, - % typical value is 0.5 frame. default 0.5 + % order low pass filter, use it to cut high temporal frequencies + % (noise or fast motion), unit is frames, typical value is 0.5 + % frame. default 0.5 % * __LocalEnergySpatialConstant__ the spatial constant of the - % first order low pass filter, use it to cut high spatial - % frequencies (noise or thick contours), unit is pixels, - % typical value is 5 pixel. default 5 + % first order low pass filter, use it to cut high spatial + % frequencies (noise or thick contours), unit is pixels, typical + % value is 5 pixel. default 5 % * __NeighborhoodEnergyTemporalConstant__ local neighborhood - % energy filtering parameters: the aim is to get information - % about the energy neighborhood to perform a center surround - % energy analysis. default 1 + % energy filtering parameters: the aim is to get information + % about the energy neighborhood to perform a center surround + % energy analysis. default 1 % * __NeighborhoodEnergySpatialConstant__ see - % `NeighborhoodEnergyTemporalConstant`. default 15 + % `NeighborhoodEnergyTemporalConstant`. default 15 % * __ContextEnergyTemporalConstant__ context neighborhood energy - % filtering parameters: the aim is to get information about - % the energy on a wide neighborhood area to filtered out - % local effects. default 1 + % filtering parameters: the aim is to get information about the + % energy on a wide neighborhood area to filtered out local + % effects. default 1 % * __ContextEnergySpatialConstant__ see - % `ContextEnergyTemporalConstant`. default 75 + % `ContextEnergyTemporalConstant`. default 75 % % Sets new parameter structure that stores the transient events % detector setup parameters. @@ -223,7 +224,7 @@ function setupParameters(this, varargin) function params = getParameters(this) %GETPARAMETERS Return the current parameters setup % - % params = obj.getParameters() + % params = obj.getParameters() % % ## Output % * __params__ the current parameters setup. @@ -237,11 +238,11 @@ function setupParameters(this, varargin) function str = printSetup(this) %PRINTSETUP Parameters setup display method % - % str = obj.printSetup() + % str = obj.printSetup() % % ## Output % * __str__ a string which contains formatted parameters - % information. + % information. % % See also: cv.TransientAreasSegmentationModule.getParameters % @@ -251,17 +252,16 @@ function setupParameters(this, varargin) function varargout = write(this, fs) %WRITE Write xml/yml formated parameters information % - % obj.write(fs) - % str = obj.write(fs) + % obj.write(fs) + % str = obj.write(fs) % % ## Input % * __fs__ the filename of the xml file that will be open and - % writen with formatted parameters information. + % writen with formatted parameters information. % % ## Output % * __str__ optional output. If requested, the parameters are - % persisted to a string in memory instead of writing to - % disk. + % persisted to a string in memory instead of writing to disk. % % See also: cv.TransientAreasSegmentationModule.setup % @@ -271,16 +271,16 @@ function setupParameters(this, varargin) function run(this, inputToSegment) %RUN Main processing method % - % obj.run(inputToSegment) - % obj.run(inputToSegment, 'OptionName',optionValue, ...) + % obj.run(inputToSegment) + % obj.run(inputToSegment, 'OptionName',optionValue, ...) % % ## Input % * __inputToSegment__ the image to process, it must match the - % instance buffer size. + % instance buffer size. % % ## Options % * __ChannelIndex__ the channel to process in case of - % multichannel images (0-based index). default 0 + % multichannel images (0-based index). default 0 % % Get result using % cv.TransientAreasSegmentationModule.getSegmentationPicture @@ -294,12 +294,12 @@ function run(this, inputToSegment) function transientAreas = getSegmentationPicture(this) %GETSEGMENTATIONPICTURE Access function % - % transientAreas = obj.getSegmentationPicture() + % transientAreas = obj.getSegmentationPicture() % % ## Output % * __transientAreas__ the last segmentation result: a boolean - % picture which is resampled between 0 and 255 for a display - % purpose. + % picture which is resampled between 0 and 255 for a display + % purpose. % % See also: cv.TransientAreasSegmentationModule.run % @@ -309,7 +309,7 @@ function run(this, inputToSegment) function clearAllBuffers(this) %CLEARALLBUFFERS Cleans all the buffers of the instance % - % obj.clearAllBuffers() + % obj.clearAllBuffers() % % See also: cv.TransientAreasSegmentationModule.run % diff --git a/opencv_contrib/+cv/VGG.m b/opencv_contrib/+cv/VGG.m index 75609bbf0..d8a20e6c7 100644 --- a/opencv_contrib/+cv/VGG.m +++ b/opencv_contrib/+cv/VGG.m @@ -14,41 +14,41 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end methods function this = VGG(varargin) %VGG Constructor % - % obj = cv.VGG() - % obj = cv.VGG('OptionName',optionValue, ...) + % obj = cv.VGG() + % obj = cv.VGG('OptionName',optionValue, ...) % % ## Options % * __Desc__ Type of descriptor to use, '120' is default (120 - % dimensions float). Available types are: - % * __120__ - % * __80__ - % * __64__ - % * __48__ - % * __Sigma__ Gaussian kernel value for image blur. - % default is 1.4 + % dimensions float). Available types are: + % * __120__ + % * __80__ + % * __64__ + % * __48__ + % * __Sigma__ Gaussian kernel value for image blur. default is 1.4 % * __ImgNormalize__ Use image sample intensity normalization. - % default true + % default true % * __UseScaleOrientation__ Sample patterns using keypoints - % orientation. default true + % orientation. default true % * __ScaleFactor__ Adjust the sampling window of detected - % keypoints to 64.0 (VGG sampling window). - % * 6.25 is default and fits for cv.KAZE, cv.SURF detected - % keypoints window ratio - % * 6.75 should be the scale for cv.SIFT detected keypoints - % window ratio - % * 5.00 should be the scale for cv.AKAZE, cv.MSDDetector, - % cv.AgastFeatureDetector, cv.FastFeatureDetector, - % cv.BRISK keypoints window ratio - % * 0.75 should be the scale for cv.ORB keypoints ratio + % keypoints to 64.0 (VGG sampling window). + % * 6.25 is default and fits for cv.KAZE, cv.SURF detected + % keypoints window ratio + % * 6.75 should be the scale for cv.SIFT detected keypoints + % window ratio + % * 5.00 should be the scale for cv.AKAZE, cv.MSDDetector, + % cv.AgastFeatureDetector, cv.FastFeatureDetector, cv.BRISK + % keypoints window ratio + % * 0.75 should be the scale for cv.ORB keypoints ratio % * __DescNormalize__ Clamp descriptors to 255 and convert to - % `uint8`. default false + % `uint8`. default false % % See also: cv.VGG.compute % @@ -58,7 +58,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.VGG % @@ -69,7 +69,7 @@ function delete(this) function typename = typeid(this) %TYPEID Name of the C++ type (RTTI) % - % typename = obj.typeid() + % typename = obj.typeid() % % ## Output % * __typename__ Name of C++ type @@ -83,7 +83,7 @@ function delete(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.VGG.empty, cv.VGG.load % @@ -93,11 +93,11 @@ function clear(this) function b = empty(this) %EMPTY Checks if detector object is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the detector object is empty (e.g in the - % very beginning or after unsuccessful read). + % very beginning or after unsuccessful read). % % See also: cv.VGG.clear % @@ -107,7 +107,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm parameters to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -123,21 +123,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from the specified XML or % YAML file (either from disk or serialized string). The previous @@ -151,11 +151,11 @@ function load(this, fname_or_str, varargin) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.VGG.save, cv.VGG.load % @@ -168,16 +168,16 @@ function load(this, fname_or_str, varargin) function ntype = defaultNorm(this) %DEFAULTNORM Returns the default norm type % - % ntype = obj.defaultNorm() + % ntype = obj.defaultNorm() % % ## Output % * __ntype__ Norm type. One of `cv::NormTypes`: - % * __Inf__ - % * __L1__ - % * __L2__ - % * __L2Sqr__ - % * __Hamming__ - % * __Hamming2__ + % * __Inf__ + % * __L1__ + % * __L2__ + % * __L2Sqr__ + % * __Hamming__ + % * __Hamming2__ % % Always `L2` for VGG. % @@ -189,7 +189,7 @@ function load(this, fname_or_str, varargin) function sz = descriptorSize(this) %DESCRIPTORSIZE Returns the descriptor size in bytes % - % sz = obj.descriptorSize() + % sz = obj.descriptorSize() % % ## Output % * __sz__ Descriptor size. @@ -202,7 +202,7 @@ function load(this, fname_or_str, varargin) function dtype = descriptorType(this) %DESCRIPTORTYPE Returns the descriptor type % - % dtype = obj.descriptorType() + % dtype = obj.descriptorType() % % ## Output % * __dtype__ Descriptor type, one of numeric MATLAB class names. @@ -217,20 +217,20 @@ function load(this, fname_or_str, varargin) function [descriptors, keypoints] = compute(this, img, keypoints) %COMPUTE Computes the descriptors for a set of keypoints detected in an image % - % [descriptors, keypoints] = obj.compute(img, keypoints) + % [descriptors, keypoints] = obj.compute(img, keypoints) % % ## Input % * __img__ Image to extract descriptors, 8-bit grayscale image. % * __keypoints__ Input collection of keypoints of interest within - % image. Keypoints for which a descriptor cannot be computed - % are removed. Sometimes new keypoints can be added, for - % example: cv.SIFT duplicates keypoint with several dominant - % orientations (for each orientation). This is a - % struct-array of detected keypoints. + % image. Keypoints for which a descriptor cannot be computed are + % removed. Sometimes new keypoints can be added, for example: + % cv.SIFT duplicates keypoint with several dominant orientations + % (for each orientation). This is a struct-array of detected + % keypoints. % % ## Output % * __descriptors__ Computed descriptors. Row `j` in `descriptors` - % is the descriptor for `j`-th keypoint. + % is the descriptor for `j`-th keypoint. % * __keypoints__ Optional output with possibly updated keypoints. % % See also: cv.VGG.VGG diff --git a/opencv_contrib/+cv/VariationalRefinement.m b/opencv_contrib/+cv/VariationalRefinement.m index e017aed59..e77421414 100644 --- a/opencv_contrib/+cv/VariationalRefinement.m +++ b/opencv_contrib/+cv/VariationalRefinement.m @@ -23,7 +23,8 @@ % properties (SetAccess = private) - id % Object ID + % Object ID + id end properties (Dependent) @@ -49,7 +50,7 @@ function this = VariationalRefinement() %VARIATIONALREFINEMENT Creates an instance of VariationalRefinement % - % obj = cv.VariationalRefinement() + % obj = cv.VariationalRefinement() % % See also: cv.VariationalRefinement.calc % @@ -59,7 +60,7 @@ function delete(this) %DELETE Destructor % - % obj.delete() + % obj.delete() % % See also: cv.VariationalRefinement % @@ -73,17 +74,17 @@ function delete(this) function flow = calc(this, I0, I1, varargin) %CALC Calculates an optical flow % - % flow = obj.calc(I0, I1) - % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) + % flow = obj.calc(I0, I1) + % flow = obj.calc(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * __flow__ computed flow image that has the same size as `I0` - % and type `single` (2-channels). + % and type `single` (2-channels). % % ## Options % * __InitialFlow__ specify the initial flow. Not set by default. @@ -96,7 +97,7 @@ function delete(this) function collectGarbage(this) %COLLECTGARBAGE Releases all inner buffers % - % obj.collectGarbage() + % obj.collectGarbage() % VariationalRefinement_(this.id, 'collectGarbage'); end @@ -107,19 +108,19 @@ function collectGarbage(this) function [flow_u, flow_v] = calcUV(this, I0, I1, varargin) %CALCUV calc function overload to handle separate horizontal (u) and vertical (v) flow components (to avoid extra splits/merges) % - % [flow_u, flow_v] = obj.calcUV(I0, I1) - % [flow_u, flow_v] = obj.calcUV(I0, I1, 'OptionName',optionValue, ...) + % [flow_u, flow_v] = obj.calcUV(I0, I1) + % [flow_u, flow_v] = obj.calcUV(I0, I1, 'OptionName',optionValue, ...) % % ## Input % * __I0__ first 8-bit single-channel input image. % * __I1__ second input image of the same size and the same type - % as `I0`. + % as `I0`. % % ## Output % * **flow_u** computed horizontal flow image that has the same - % size as `I0` and type `single` (1-channel1). - % * **flow_v** computed vertical flow image that has the same - % size as `I0` and type `single` (1-channel1). + % size as `I0` and type `single` (1-channel1). + % * **flow_v** computed vertical flow image that has the same size + % as `I0` and type `single` (1-channel1). % % ## Options % * __InitialFlowU__ specify initial U-flow. Not set by default. @@ -138,7 +139,7 @@ function collectGarbage(this) function clear(this) %CLEAR Clears the algorithm state % - % obj.clear() + % obj.clear() % % See also: cv.VariationalRefinement.empty % @@ -148,11 +149,11 @@ function clear(this) function b = empty(this) %EMPTY Returns true if the algorithm is empty % - % b = obj.empty() + % b = obj.empty() % % ## Output % * __b__ Returns true if the algorithm is empty (e.g. in the very - % beginning or after unsuccessful read). + % beginning or after unsuccessful read). % % See also: cv.VariationalRefinement.clear % @@ -162,11 +163,11 @@ function clear(this) function name = getDefaultName(this) %GETDEFAULTNAME Returns the algorithm string identifier % - % name = obj.getDefaultName() + % name = obj.getDefaultName() % % ## Output % * __name__ This string is used as top level XML/YML node tag - % when the object is saved to a file or string. + % when the object is saved to a file or string. % % See also: cv.VariationalRefinement.save, cv.VariationalRefinement.load % @@ -176,7 +177,7 @@ function clear(this) function save(this, filename) %SAVE Saves the algorithm to a file % - % obj.save(filename) + % obj.save(filename) % % ## Input % * __filename__ Name of the file to save to. @@ -191,21 +192,21 @@ function save(this, filename) function load(this, fname_or_str, varargin) %LOAD Loads algorithm from a file or a string % - % obj.load(fname) - % obj.load(str, 'FromString',true) - % obj.load(..., 'OptionName',optionValue, ...) + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) % % ## Input % * __fname__ Name of the file to read. % * __str__ String containing the serialized model you want to - % load. + % load. % % ## Options % * __ObjName__ The optional name of the node to read (if empty, - % the first top-level node will be used). default empty - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized model. - % default false + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false % % This method reads algorithm parameters from a file storage. % The previous model state is discarded. diff --git a/opencv_contrib/+cv/WBDetector.m b/opencv_contrib/+cv/WBDetector.m index e0f2de5b2..28a16aaa2 100644 --- a/opencv_contrib/+cv/WBDetector.m +++ b/opencv_contrib/+cv/WBDetector.m @@ -1,5 +1,5 @@ classdef WBDetector < handle - %WBDETECTOR WaldBoost detector: Object Detection using Boosted Features + %WBDETECTOR WaldBoost detector - Object Detection using Boosted Features % % Class for object detection using WaldBoost from [Sochman05]. % Uses a Waldboost cascade and local binary patterns computed as integral @@ -8,20 +8,20 @@ % ## Example % The basic usage is the following: % - % % train - % detector = cv.WBDetector(); - % detector.train('/path/to/pos/', '/path/to/neg/'); - % detector.write('model.xml'); + % % train + % detector = cv.WBDetector(); + % detector.train('/path/to/pos/', '/path/to/neg/'); + % detector.write('model.xml'); % - % % detect - % detector = cv.WBDetector(); - % detector.read('model.xml'); - % img = cv.imread('image.png', 'Grayscale',true); - % [bboxes,conf] = detector.detect(img); - % for i=1:numel(bboxes) - % img = cv.rectangle(img, bboxes{i}, 'Color',[0 255 0]); - % end - % imshow(img) + % % detect + % detector = cv.WBDetector(); + % detector.read('model.xml'); + % img = cv.imread('image.png', 'Grayscale',true); + % [bboxes,conf] = detector.detect(img); + % for i=1:numel(bboxes) + % img = cv.rectangle(img, bboxes{i}, 'Color',[0 255 0]); + % end + % imshow(img) % % ## References % [Sochman05]: @@ -42,7 +42,7 @@ function this = WBDetector() %WBDETECTOR Create instance of WBDetector % - % detector = cv.WBDetector() + % detector = cv.WBDetector() % % See also: cv.WBDetector, cv.WBDetector.train, cv.WBDetector.read % @@ -52,7 +52,7 @@ function delete(this) %DELETE Destructor % - % detector.delete() + % detector.delete() % % See also: cv.WBDetector % @@ -63,18 +63,18 @@ function delete(this) function read(this, fname_or_str, varargin) %READ Read detector from file % - % detector.read(filename) - % detector.read(str, 'FromString',true) - % detector.read(..., 'OptionName', optionValue, ...) + % detector.read(filename) + % detector.read(str, 'FromString',true) + % detector.read(..., 'OptionName', optionValue, ...) % % ## Input % * __filename__ Name of the file to read from. % * __str__ String containing serialized detector you want to load. % % ## Options - % * __FromString__ Logical flag to indicate whether the input is - % a filename or a string containing the serialized detector. - % default false + % * __FromString__ Logical flag to indicate whether the input is a + % a filename or a string containing the serialized detector. + % default false % % See also: cv.WBDetector.write % @@ -84,15 +84,15 @@ function read(this, fname_or_str, varargin) function varargout = write(this, filename) %WRITE Write detector to file % - % detector.write(filename) - % str = detector.write(filename) + % detector.write(filename) + % str = detector.write(filename) % % ## Input % * __filename__ Name of the file to save to. % % ## Output % * __str__ optional output. If requested, the object is persisted - % to a string in memory instead of writing to disk. + % to a string in memory instead of writing to disk. % % See also: cv.WBDetector.read % @@ -102,13 +102,13 @@ function read(this, fname_or_str, varargin) function train(this, pos_samples, neg_imgs) %TRAIN Train WaldBoost detector % - % detector.train(pos_samples, neg_imgs) + % detector.train(pos_samples, neg_imgs) % % ## Input % * **pos_samples** Path to directory with cropped positive - % samples. + % samples. % * **neg_imgs** Path to directory with negative (background) - % images. + % images. % % See also: cv.WBDetector.detect % @@ -118,7 +118,7 @@ function train(this, pos_samples, neg_imgs) function [bboxes,confidences] = detect(this, img) %DETECT Detect objects on image using WaldBoost detector % - % [bboxes,confidences] = detector.detect(img) + % [bboxes,confidences] = detector.detect(img) % % ## Input % * __img__ Input image for detection. diff --git a/opencv_contrib/+cv/anisotropicDiffusion.m b/opencv_contrib/+cv/anisotropicDiffusion.m index b2142c6d4..70877c50a 100644 --- a/opencv_contrib/+cv/anisotropicDiffusion.m +++ b/opencv_contrib/+cv/anisotropicDiffusion.m @@ -1,25 +1,25 @@ %ANISOTROPICDIFFUSION Performs anisotropic diffusion on an image % -% dst = cv.anisotropicDiffusion(src) -% dst = cv.anisotropicDiffusion(src, 'OptionName',optionValue, ...) +% dst = cv.anisotropicDiffusion(src) +% dst = cv.anisotropicDiffusion(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ 8-bit 3-channel source image. % % ## Output % * __dst__ Destination image of the same size and the same number of channels -% as `src`. +% as `src`. % % ## Options % * __Alpha__ The amount of time to step forward by on each iteration -% (normally, it's between 0 and 1). default 1.0 +% (normally, it's between 0 and 1). default 1.0 % * __K__ sensitivity to the edges. default 0.02 % * __Iterations__ The number of iterations. default 10 % % The function applies Perona-Malik anisotropic diffusion to an image. This is % the solution to the partial differential equation: % -% dI/dt = div(c(x,y,t) * nabla_I) = nabla_c * nabla_I + c(x,y,t) * delta_I +% dI/dt = div(c(x,y,t) * nabla_I) = nabla_c * nabla_I + c(x,y,t) * delta_I % % where `delta` denotes the Laplacian, `nabla` denotes the gradient, `div` is % the divergence operator and `c(x,y,t)` is the diffusion coefficient (usually diff --git a/opencv_contrib/+cv/applyChannelGains.m b/opencv_contrib/+cv/applyChannelGains.m index b4a44cddd..462556c5e 100644 --- a/opencv_contrib/+cv/applyChannelGains.m +++ b/opencv_contrib/+cv/applyChannelGains.m @@ -1,10 +1,10 @@ %APPLYCHANNELGAINS Implements an efficient fixed-point approximation for applying channel gains, which is the last step of multiple white balance algorithms % -% dst = cv.applyChannelGains(src, gains) +% dst = cv.applyChannelGains(src, gains) % % ## Input % * __src__ Input 3-channel image in the BGR color space (either `uint8` or -% `uint16`). +% `uint16`). % * __gains__ gains for the [B,G,R] channels. % % ## Output diff --git a/opencv_contrib/+cv/bilateralTextureFilter.m b/opencv_contrib/+cv/bilateralTextureFilter.m index 0c6a92e2b..1ae02b0db 100644 --- a/opencv_contrib/+cv/bilateralTextureFilter.m +++ b/opencv_contrib/+cv/bilateralTextureFilter.m @@ -1,7 +1,7 @@ %BILATERALTEXTUREFILTER Applies the bilateral texture filter to an image % -% dst = cv.bilateralTextureFilter(src) -% dst = cv.bilateralTextureFilter(src, 'OptionName', optionValue, ...) +% dst = cv.bilateralTextureFilter(src) +% dst = cv.bilateralTextureFilter(src, 'OptionName', optionValue, ...) % % ## Input % * __src__ Source image whose depth is 8-bit `uint8` or 32-bit `single` @@ -11,16 +11,15 @@ % % ## Options % * __FR__ Radius of kernel to be used for filtering. It should be positive -% integer. default 3 +% integer. default 3 % * __NumIter__ Number of iterations of algorithm, It should be positive -% integer. default 1 +% integer. default 1 % * __SigmaAlpha__ Controls the sharpness of the weight transition from edges -% to smooth/texture regions, where a bigger value means sharper -% transition. When the value is negative, it is automatically -% calculated. default -1 +% to smooth/texture regions, where a bigger value means sharper transition. +% When the value is negative, it is automatically calculated. default -1 % * __SigmaAvg__ Range blur parameter for texture blurring. Larger value makes -% result to be more blurred. When the value is negative, it is -% automatically calculated as described in the paper. default -1 +% result to be more blurred. When the value is negative, it is automatically +% calculated as described in the paper. default -1 % % It performs structure-preserving texture filter. For more details about this % filter see [Cho2014]. diff --git a/opencv_contrib/+cv/bm3dDenoising.m b/opencv_contrib/+cv/bm3dDenoising.m index 046af7b1a..0771fec3a 100644 --- a/opencv_contrib/+cv/bm3dDenoising.m +++ b/opencv_contrib/+cv/bm3dDenoising.m @@ -1,61 +1,58 @@ %BM3DDENOISING Performs image denoising using the Block-Matching and 3D-filtering algorithm % -% dst = cv.bm3dDenoising(src) -% [dstStep1,dstStep2] = cv.bm3dDenoising(src) -% [...] = cv.bm3dDenoising(..., 'OptionName',optionValue, ...) +% dst = cv.bm3dDenoising(src) +% [dstStep1, dstStep2] = cv.bm3dDenoising(src) +% [...] = cv.bm3dDenoising(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Input 8-bit or 16-bit 1-channel image. % % ## Output % * __dst__ Output image with the same size and type as `src`. -% * __dstStep1__ Output image of the first step of BM3D with the same size -% and type as `src`. `Basic` image must be provided. +% * __dstStep1__ Output image of the first step of BM3D with the same size and +% type as `src`. `Basic` image must be provided. % * __dstStep2__ Output image of the second step of BM3D with the same size -% and type as `src`. +% and type as `src`. % % ## Options % * __Basic__ Basic image, initial estimate of the first step. Must be -% provided in the case of `Step=2`. +% provided in the case of `Step=2`. % * __H__ Parameter regulating filter strength. Big `H` value perfectly -% removes noise but also removes image details, smaller `H` value -% preserves details but also preserves some noise. default 1 +% removes noise but also removes image details, smaller `H` value preserves +% details but also preserves some noise. default 1 % * __TemplateWindowSize__ Size in pixels of the template patch that is used -% for block-matching. Should be power of 2. default 4 +% for block-matching. Should be power of 2. default 4 % * __SearchWindowSize__ Size in pixels of the window that is used to perform -% block-matching. Affect performance linearly: greater -% `SearchWindowsSize` greater denoising time. Must be larger than -% `TemplateWindowSize`. default 16 -% * __BlockMatchingStep1__ Block matching threshold for the first step of -% BM3D (hard thresholding), i.e. maximum distance for which two blocks -% are considered similar. Value expressed in euclidean distance. -% default 2500 +% block-matching. Affect performance linearly: greater `SearchWindowsSize` +% greater denoising time. Must be larger than `TemplateWindowSize`. +% default 16 +% * __BlockMatchingStep1__ Block matching threshold for the first step of BM3D +% (hard thresholding), i.e. maximum distance for which two blocks are +% considered similar. Value expressed in euclidean distance. default 2500 % * __BlockMatchingStep2__ Block matching threshold for the second step of -% BM3D (Wiener filtering), i.e. maximum distance for which two blocks -% are considered similar. Value expressed in euclidean distance. -% default 400 +% BM3D (Wiener filtering), i.e. maximum distance for which two blocks are +% considered similar. Value expressed in euclidean distance. default 400 % * __GroupSize__ Maximum size of the 3D group for collaborative filtering. -% default 8 +% default 8 % * __SlidingStep__ Sliding step to process every next reference block. -% default 1 +% default 1 % * __Beta__ Kaiser window parameter that affects the sidelobe attenuation of -% the transform of the window. Kaiser window is used in order to reduce -% border effects. To prevent usage of the window, set beta to zero. -% default 2.0 +% the transform of the window. Kaiser window is used in order to reduce +% border effects. To prevent usage of the window, set beta to zero. +% default 2.0 % * __NormType__ Norm used to calculate distance between blocks. L2 is slower -% than L1 but yields more accurate results. default 'L2'. One: -% * __L2__ -% * __L1__ +% than L1 but yields more accurate results. default 'L2'. One: +% * __L2__ +% * __L1__ % * __Step__ Step of BM3D to be executed. Possible variants are: step 1, -% step 2, both steps. In the first variant, allowed are only -% '1' and 'All'. '2' is not allowed as it requires `Basic` estimate to -% be present. One of: -% * __All__ (default) Execute all steps of the algorithm. -% * __1__ Execute only first step of the algorithm. -% * __2__ Execute only second step of the algorithm. +% step 2, both steps. In the first variant, allowed are only '1' and 'All'. +% '2' is not allowed as it requires `Basic` estimate to be present. One of: +% * __All__ (default) Execute all steps of the algorithm. +% * __1__ Execute only first step of the algorithm. +% * __2__ Execute only second step of the algorithm. % * __TransformType__ Type of the orthogonal transform used in collaborative -% filtering step. Currently only Haar transform is supported. One of: -% * __Haar__ (default) Un-normalized Haar transform. +% filtering step. Currently only Haar transform is supported. One of: +% * __Haar__ (default) Un-normalized Haar transform. % % Performs image denoising using the Block-Matching and 3D-filtering algorithm % [PDF](http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf) with several diff --git a/opencv_contrib/+cv/boardDump.m b/opencv_contrib/+cv/boardDump.m index 08fcde927..592710448 100644 --- a/opencv_contrib/+cv/boardDump.m +++ b/opencv_contrib/+cv/boardDump.m @@ -1,16 +1,16 @@ %BOARDDUMP Dump board (aruco) % -% s = cv.boardDump(board) +% s = cv.boardDump(board) % % ## Input % * __board__ layout of markers in the board. % % ## Output % * __s__ Output struct with the following fields: -% * __objPoints__ array of object points of all the marker corners in -% the board. -% * __dictionary__ the dictionary of markers employed for this board. -% * __ids__ vector of the identifiers of the markers in the board. +% * __objPoints__ array of object points of all the marker corners in the +% board. +% * __dictionary__ the dictionary of markers employed for this board. +% * __ids__ vector of the identifiers of the markers in the board. % % See also: cv.estimatePoseBoard, cv.dictionaryDump % diff --git a/opencv_contrib/+cv/calcGlobalOrientation.m b/opencv_contrib/+cv/calcGlobalOrientation.m index 27b86625a..6ad59d3a1 100644 --- a/opencv_contrib/+cv/calcGlobalOrientation.m +++ b/opencv_contrib/+cv/calcGlobalOrientation.m @@ -1,17 +1,17 @@ %CALCGLOBALORIENTATION Calculates a global motion orientation in a selected region % -% orient = cv.calcGlobalOrientation(orientation, mask, mhi, timestamp, duration) +% orient = cv.calcGlobalOrientation(orientation, mask, mhi, timestamp, duration) % % ## Input % * __orientation__ Motion gradient orientation image calculated by the -% function cv.calcMotionGradient +% function cv.calcMotionGradient % * __mask__ Mask image. It may be a conjunction of a valid gradient mask, -% also calculated by cv.calcMotionGradient, and the mask of a region -% whose direction needs to be calculated. +% also calculated by cv.calcMotionGradient, and the mask of a region whose +% direction needs to be calculated. % * __mhi__ Motion history image calculated by cv.updateMotionHistory. % * __timestamp__ Timestamp passed to cv.updateMotionHistory. % * __duration__ Maximum duration of a motion track in milliseconds, passed to -% cv.updateMotionHistory. +% cv.updateMotionHistory. % % ## Output % * __orient__ Output global orientation. diff --git a/opencv_contrib/+cv/calcMotionGradient.m b/opencv_contrib/+cv/calcMotionGradient.m index 7087f28f6..5ef6eb721 100644 --- a/opencv_contrib/+cv/calcMotionGradient.m +++ b/opencv_contrib/+cv/calcMotionGradient.m @@ -1,32 +1,32 @@ %CALCMOTIONGRADIENT Calculates a gradient orientation of a motion history image % -% [mask,orientation] = cv.calcMotionGradient(mhi, delta1, delta2) -% [...] = cv.calcMotionGradient(..., 'OptionName', optionValue, ...) +% [mask, orientation] = cv.calcMotionGradient(mhi, delta1, delta2) +% [...] = cv.calcMotionGradient(..., 'OptionName', optionValue, ...) % % ## Input % * __mhi__ Motion history single-channel floating-point image. % * __delta1__ Minimal (or maximal) allowed difference between `mhi` values -% within a pixel neighorhood. +% within a pixel neighorhood. % * __delta2__ Maximal (or minimal) allowed difference between `mhi` values -% within a pixel neighorhood. That is, the function finds the minimum -% (`m(x,y)`) and maximum (`M(x,y)`) `mhi` values over 3x3 neighborhood -% of each pixel and marks the motion orientation at `(x,y)` as valid -% only if: `min(delta1,delta2) <= M(x,y) - m(x,y) <= max(delta1,delta2)` +% within a pixel neighorhood. That is, the function finds the minimum +% (`m(x,y)`) and maximum (`M(x,y)`) `mhi` values over 3x3 neighborhood of +% each pixel and marks the motion orientation at `(x,y)` as valid only if: +% `min(delta1,delta2) <= M(x,y) - m(x,y) <= max(delta1,delta2)` % % ## Output % * __mask__ Output mask image that has the type `uint8` and the same size a -% `mhi`. Its non-zero elements mark pixels where the motion gradient -% data is correct. +% `mhi`. Its non-zero elements mark pixels where the motion gradient data is +% correct. % * __orientation__ Output motion gradient orientation image that has the same -% type and the same size as `mhi`. Each pixel of the image is a motion -% orientation, from 0 to 360 degrees. +% type and the same size as `mhi`. Each pixel of the image is a motion +% orientation, from 0 to 360 degrees. % % ## Options % * __ApertureSize__ Aperture size of the cv.Sobel operator. % % The function calculates a gradient orientation at each pixel `(x,y)` as: % -% orientation(x,y) = arctan((dmhi / dy) / (dmhi / dx)) +% orientation(x,y) = arctan((dmhi / dy) / (dmhi / dx)) % % In fact, `fastAtan2` and `phase` are used so that the computed angle is % measured in degrees and covers the full range 0..360. Also, the mask is diff --git a/opencv_contrib/+cv/calcOpticalFlowDF.m b/opencv_contrib/+cv/calcOpticalFlowDF.m index b60676f80..19208d456 100644 --- a/opencv_contrib/+cv/calcOpticalFlowDF.m +++ b/opencv_contrib/+cv/calcOpticalFlowDF.m @@ -1,6 +1,6 @@ %CALCOPTICALFLOWDF DeepFlow optical flow algorithm implementation % -% flow = cv.calcOpticalFlowDF(I0, I1) +% flow = cv.calcOpticalFlowDF(I0, I1) % % ## Input % * __I0__ First 8-bit single-channel grayscale input image. @@ -8,8 +8,7 @@ % % ## Output % * __flow__ computed flow image that has the same size as `I0` and type -% `single` (2-channels). Flow for `(x,y)` is stored in the third -% dimension. +% `single` (2-channels). Flow for `(x,y)` is stored in the third dimension. % % The class implements the DeepFlow optical flow algorithm described in % [Weinzaepfel2013]. See also @@ -21,14 +20,14 @@ % * __Gamma__ Gradient constancy weight. default 5.0 % * __Sigma__ Gaussian smoothing parameter. default 0.6 % * __MinSize__ Minimal dimension of an image in the pyramid (next, smaller -% images in the pyramid are generated until one of the dimensions -% reaches this size). default 25 +% images in the pyramid are generated until one of the dimensions reaches +% this size). default 25 % * __DownscaleFactor__ Scaling factor in the image pyramid (must be <1). -% default 0.95 +% default 0.95 % * __FixedPointIterations__ How many iterations on each level of the pyramid. -% default 5 +% default 5 % * __SorIterations__ Iterations of Succesive Over-Relaxation (solver). -% default 25 +% default 25 % * __Omega__ Relaxation factor in SOR. default 1.6 % % ## References diff --git a/opencv_contrib/+cv/calcOpticalFlowSF.m b/opencv_contrib/+cv/calcOpticalFlowSF.m index 5fa0cdd67..cc8daa3a6 100644 --- a/opencv_contrib/+cv/calcOpticalFlowSF.m +++ b/opencv_contrib/+cv/calcOpticalFlowSF.m @@ -1,7 +1,7 @@ %CALCOPTICALFLOWSF Calculate an optical flow using "SimpleFlow" algorithm % -% flow = cv.calcOpticalFlowSF(from, to) -% flow = cv.calcOpticalFlowSF(from, to, 'OptionName',optionValue, ...) +% flow = cv.calcOpticalFlowSF(from, to) +% flow = cv.calcOpticalFlowSF(from, to, 'OptionName',optionValue, ...) % % ## Input % * __from__ First 8-bit 3-channel image. @@ -9,31 +9,30 @@ % % ## Output % * __flow__ computed flow image that has the same size as `from` and type -% `single` (2-channels). Flow for `(x,y)` is stored in the third -% dimension. +% `single` (2-channels). Flow for `(x,y)` is stored in the third dimension. % % ## Options % * __Layers__ Number of layers. default 3 % * __AveragingBlockSize__ Size of block through which we sum up when -% calculate cost function for pixel. default 2 +% calculate cost function for pixel. default 2 % * __MaxFlow__ maximal flow that we search at each level. default 4 % * __SigmaDist__ vector smooth spatial sigma parameter. default 4.1 % * __SigmaColor__ vector smooth color sigma parameter. default 25.5 % * __PostprocessWindow__ window size for postprocess cross bilateral filter. -% default 18 +% default 18 % * __SigmaDistFix__ spatial sigma for postprocess cross bilateralf filter. -% default 55.0 +% default 55.0 % * __SigmaColorFix__ color sigma for postprocess cross bilateral filter. -% default 25.5 +% default 25.5 % * __OccThr__ threshold for detecting occlusions. default 0.35 % * __UpscaleAveragingRadius__ window size for bilateral upscale operation. -% default 18 +% default 18 % * __UpscaleSigmaDist__ spatial sigma for bilateral upscale operation. -% default 55.0 +% default 55.0 % * __UpscaleSigmaColor__ color sigma for bilateral upscale operation. -% default 25.5 -% * __SpeedUpThr__ threshold to detect point with irregular flow - where flow -% should be recalculated after upscale. default 10 +% default 25.5 +% * __SpeedUpThr__ threshold to detect point with irregular flow, where flow +% should be recalculated after upscale. default 10 % % See [Tao2012], and site of % [project](http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/). diff --git a/opencv_contrib/+cv/calcOpticalFlowSparseToDense.m b/opencv_contrib/+cv/calcOpticalFlowSparseToDense.m index 35adc440b..c81c938af 100644 --- a/opencv_contrib/+cv/calcOpticalFlowSparseToDense.m +++ b/opencv_contrib/+cv/calcOpticalFlowSparseToDense.m @@ -1,35 +1,34 @@ %CALCOPTICALFLOWSPARSETODENSE Fast dense optical flow based on PyrLK sparse matches interpolation % -% flow = cv.calcOpticalFlowSparseToDense(from, to) -% flow = cv.calcOpticalFlowSparseToDense(from, to, 'OptionName',optionValue, ...) +% flow = cv.calcOpticalFlowSparseToDense(from, to) +% flow = cv.calcOpticalFlowSparseToDense(from, to, 'OptionName',optionValue, ...) % % ## Input % * __from__ first 8-bit 3-channel or 1-channel image. % * __to__ second 8-bit 3-channel or 1-channel image of the same size as -% `from`. +% `from`. % % ## Output % * __flow__ computed flow image that has the same size as `from` and type -% `single` (2-channels). Flow for `(x,y)` is stored in the third -% dimension. +% `single` (2-channels). Flow for `(x,y)` is stored in the third dimension. % % ## Options % * __GridStep__ stride used in sparse match computation. Lower values usually -% result in higher quality but slow down the algorithm. default 8 +% result in higher quality but slow down the algorithm. default 8 % * __K__ number of nearest-neighbor matches considered, when fitting a -% locally affine model. Lower values can make the algorithm noticeably -% faster at the cost of some quality degradation. default 128 +% locally affine model. Lower values can make the algorithm noticeably +% faster at the cost of some quality degradation. default 128 % * __Sigma__ parameter defining how fast the weights decrease in the -% locally-weighted affine fitting. Higher values can help preserve fine -% details, lower values can help to get rid of the noise in the output -% flow. default 0.05 +% locally-weighted affine fitting. Higher values can help preserve fine +% details, lower values can help to get rid of the noise in the output flow. +% default 0.05 % * __UsePostProcessing__ defines whether the -% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter is used for -% post-processing after interpolation. default true +% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter is used for +% post-processing after interpolation. default true % * __FGSLambda__ see the respective parameter of -% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter, default 500.0 +% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter, default 500.0 % * __FGSSigma__ see the respective parameter of -% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter, default 1.5 +% cv.FastGlobalSmootherFilter.fastGlobalSmootherFilter, default 1.5 % % See also: cv.calcOpticalFlowPyrLK, cv.SparsePyrLKOpticalFlow % diff --git a/opencv_contrib/+cv/calibrateCameraAruco.m b/opencv_contrib/+cv/calibrateCameraAruco.m index 53bd71bd0..61b54a008 100644 --- a/opencv_contrib/+cv/calibrateCameraAruco.m +++ b/opencv_contrib/+cv/calibrateCameraAruco.m @@ -1,47 +1,46 @@ %CALIBRATECAMERAARUCO Calibrate a camera using aruco markers % -% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCameraAruco(corners, ids, counter, board, imageSize) -% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCameraAruco(...) -% [...] = cv.calibrateCameraAruco(..., 'OptionName',optionValue, ...) +% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCameraAruco(corners, ids, counter, board, imageSize) +% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCameraAruco(...) +% [...] = cv.calibrateCameraAruco(..., 'OptionName',optionValue, ...) % % ## Input % * __corners__ vector of detected marker corners in all frames. The corners -% should have the same format returned by cv.detectMarkers -% `{{[x,y],..}, ..}`. +% should have the same format returned by cv.detectMarkers +% `{{[x,y],..}, ..}`. % * __ids__ vector of identifiers for each marker in `corners` (0-based). % * __counter__ number of markers in each frame so that `corners` and `ids` -% can be split. +% can be split. % * __board__ Marker board layout. % * __imageSize__ Size of the image `[w,h]` used only to initialize the -% intrinsic camera matrix. +% intrinsic camera matrix. % % ## Output % * __cameraMatrix__ Output 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. If any of `UseIntrinsicGuess`, -% `FixAspectRatio`, or `FixFocalLength` are specified, some or all of -% `fx`, `fy`, `cx`, `cy` must be initialized before calling the function. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. If any of `UseIntrinsicGuess`, +% `FixAspectRatio`, or `FixFocalLength` are specified, some or all of `fx`, +% `fy`, `cx`, `cy` must be initialized before calling the function. % * __distCoeffs__ Output vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % * __reprojErr__ the overall RMS re-projection error. % * __rvecs__ Output cell-array of rotation vectors estimated for each board -% view. That is, each k-th rotation vector together with the -% corresponding k-th translation vector (see the next output parameter -% description) brings the board pattern from the model coordinate space -% (in which object points are specified) to the world coordinate space, -% that is, a real position of the board pattern in the k-th pattern view -% (`k=0..M-1`). +% view. That is, each k-th rotation vector together with the corresponding +% k-th translation vector (see the next output parameter description) brings +% the board pattern from the model coordinate space (in which object points +% are specified) to the world coordinate space, that is, a real position of +% the board pattern in the k-th pattern view (`k=0..M-1`). % * __tvecs__ Output cell-array of translation vectors estimated for each -% pattern view. +% pattern view. % * __stdDevsIntrinsics__ Output vector of standard deviations estimated for -% intrinsic parameters. Order of deviations values: -% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one -% of parameters is not estimated, its deviation is equals to zero. +% intrinsic parameters. Order of deviations values: +% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one of +% parameters is not estimated, its deviation is equals to zero. % * __stdDevsExtrinsics__ Output vector of standard deviations estimated for -% extrinsic parameters. Order of deviations values: -% `(R1, T1, ..., RM, TM)` where `M` is number of pattern views, `Ri, Ti` -% are concatenated 1x3 vectors. +% extrinsic parameters. Order of deviations values: `(R1, T1, ..., RM, TM)` +% where `M` is number of pattern views, `Ri, Ti` are concatenated 1x3 +% vectors. % * __perViewErrors__ Output vector of average re-projection errors estimated -% for each pattern view. +% for each pattern view. % % ## Options % * __CameraMatrix__, __DistCoeffs__, __UseIntrinsicGuess__, @@ -49,10 +48,10 @@ % __ZeroTangentDist__, __FixTangentDist__, __FixK1__, ..., __FixK6__, % __RationalModel__, __ThinPrismModel__, __FixS1S2S3S4__, __TiltedModel__, % __FixTauXTauY__, __UseLU__, __UseQR__ -% Different flags for the calibration process. See cv.calibrateCamera -% for details. +% Different flags for the calibration process. See cv.calibrateCamera for +% details. % * __Criteria__ Termination criteria for the iterative optimization algorithm. -% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` +% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` % % This function calibrates a camera using an Aruco Board. The function % receives a list of detected markers from several views of the board. The diff --git a/opencv_contrib/+cv/calibrateCameraCharuco.m b/opencv_contrib/+cv/calibrateCameraCharuco.m index cba82edcc..fc589fbce 100644 --- a/opencv_contrib/+cv/calibrateCameraCharuco.m +++ b/opencv_contrib/+cv/calibrateCameraCharuco.m @@ -1,44 +1,43 @@ %CALIBRATECAMERACHARUCO Calibrate a camera using Charuco corners % -% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize) -% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCameraCharuco(...) -% [...] = cv.calibrateCameraCharuco(..., 'OptionName',optionValue, ...) +% [cameraMatrix, distCoeffs, reprojErr] = cv.calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize) +% [cameraMatrix, distCoeffs, reprojErr, rvecs, tvecs, stdDevsIntrinsics, stdDevsExtrinsics, perViewErrors] = cv.calibrateCameraCharuco(...) +% [...] = cv.calibrateCameraCharuco(..., 'OptionName',optionValue, ...) % % ## Input % * __charucoCorners__ cell-array of detected charuco corners per frame -% `{{[x,y],..}, ..}`. +% `{{[x,y],..}, ..}`. % * __charucoIds__ cell-array of identifiers for each corner in -% `charucoCorners` per frame (0-based) `{[id,..], ..}` +% `charucoCorners` per frame (0-based) `{[id,..], ..}` % * __board__ Marker board layout. % * __imageSize__ input image size `[w,h]`. % % ## Output % * __cameraMatrix__ Output 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. If any of `UseIntrinsicGuess`, -% `FixAspectRatio`, or `FixFocalLength` are specified, some or all of -% `fx`, `fy`, `cx`, `cy` must be initialized before calling the function. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. If any of `UseIntrinsicGuess`, +% `FixAspectRatio`, or `FixFocalLength` are specified, some or all of `fx`, +% `fy`, `cx`, `cy` must be initialized before calling the function. % * __distCoeffs__ Output vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % * __reprojErr__ the overall RMS re-projection error. % * __rvecs__ Output cell-array of rotation vectors estimated for each board -% view. That is, each k-th rotation vector together with the -% corresponding k-th translation vector (see the next output parameter -% description) brings the board pattern from the model coordinate space -% (in which object points are specified) to the world coordinate space, -% that is, a real position of the board pattern in the k-th pattern view -% (`k=0..M-1`). +% view. That is, each k-th rotation vector together with the corresponding +% k-th translation vector (see the next output parameter description) brings +% the board pattern from the model coordinate space (in which object points +% are specified) to the world coordinate space, that is, a real position of +% the board pattern in the k-th pattern view (`k=0..M-1`). % * __tvecs__ Output cell-array of translation vectors estimated for each -% pattern view. +% pattern view. % * __stdDevsIntrinsics__ Output vector of standard deviations estimated for -% intrinsic parameters. Order of deviations values: -% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one -% of parameters is not estimated, its deviation is equals to zero. +% intrinsic parameters. Order of deviations values: +% `(fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,taux,tauy)`. If one of +% parameters is not estimated, its deviation is equals to zero. % * __stdDevsExtrinsics__ Output vector of standard deviations estimated for -% extrinsic parameters. Order of deviations values: -% `(R1, T1, ..., RM, TM)` where `M` is number of pattern views, `Ri, Ti` -% are concatenated 1x3 vectors. +% extrinsic parameters. Order of deviations values: `(R1, T1, ..., RM, TM)` +% where `M` is number of pattern views, `Ri, Ti` are concatenated 1x3 +% vectors. % * __perViewErrors__ Output vector of average re-projection errors estimated -% for each pattern view. +% for each pattern view. % % ## Options % * __CameraMatrix__, __DistCoeffs__, __UseIntrinsicGuess__, @@ -46,10 +45,10 @@ % __ZeroTangentDist__, __FixTangentDist__, __FixK1__, ..., __FixK6__, % __RationalModel__, __ThinPrismModel__, __FixS1S2S3S4__, __TiltedModel__, % __FixTauXTauY__, __UseLU__, __UseQR__ -% Different flags for the calibration process. See cv.calibrateCamera -% for details. +% Different flags for the calibration process. See cv.calibrateCamera for +% details. % * __Criteria__ Termination criteria for the iterative optimization algorithm. -% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` +% default `struct('type','Count+EPS', 'maxCount',30, 'epsilon',eps)` % % This function calibrates a camera using a set of corners of a Charuco Board. % The function receives a list of detected corners and its identifiers from diff --git a/opencv_contrib/+cv/covarianceEstimation.m b/opencv_contrib/+cv/covarianceEstimation.m index e13f2a8b8..b375a9a81 100644 --- a/opencv_contrib/+cv/covarianceEstimation.m +++ b/opencv_contrib/+cv/covarianceEstimation.m @@ -1,18 +1,18 @@ %COVARIANCEESTIMATION Computes the estimated covariance matrix of an image using the sliding window forumlation % -% dst = cv.covarianceEstimation(src, windowSize) +% dst = cv.covarianceEstimation(src, windowSize) % % ## Input -% * __src__ The source image. Input image must be of a complex type -% and floating-point type. Input should be arranged as a 2-channels -% matrix `size(src,3)==2` (corresponding to real and imaginary parts). +% * __src__ The source image. Input image must be of a complex type and +% floating-point type. Input should be arranged as a 2-channels matrix +% `size(src,3)==2` (corresponding to real and imaginary parts). % * __windowSize__ The number of rows and cols in the window -% `[windowRows,windowCols]`. +% `[windowRows,windowCols]`. % % ## Output % * __dst__ The destination estimated covariance matrix. Output matrix will be -% of size `[windowRows*windowCols, windowRows*windowCols, 2]` (channels -% correspond to real and imaginary parts). +% of size `[windowRows*windowCols, windowRows*windowCols, 2]` (channels +% correspond to real and imaginary parts). % % The window size parameters control the accuracy of the estimation. The % sliding window moves over the entire image from the top-left corner to the diff --git a/opencv_contrib/+cv/dctDenoising.m b/opencv_contrib/+cv/dctDenoising.m index a601a639f..87af36732 100644 --- a/opencv_contrib/+cv/dctDenoising.m +++ b/opencv_contrib/+cv/dctDenoising.m @@ -1,11 +1,11 @@ %DCTDENOISING The function implements simple dct-based denoising % -% dst = cv.dctDenoising(src) -% dst = cv.dctDenoising(src, 'OptionName',optionValue, ...) +% dst = cv.dctDenoising(src) +% dst = cv.dctDenoising(src, 'OptionName',optionValue, ...) % % ## Input -% * __src__ source image (gray or RGB). -% Internally the function operates on `single` data type. +% * __src__ source image (gray or RGB). Internally the function operates on +% `single` data type. % % ## Output % * __dst__ destination image, same size and type as input `src`. diff --git a/opencv_contrib/+cv/detectCharucoDiamond.m b/opencv_contrib/+cv/detectCharucoDiamond.m index 696e93cdf..a4148b065 100644 --- a/opencv_contrib/+cv/detectCharucoDiamond.m +++ b/opencv_contrib/+cv/detectCharucoDiamond.m @@ -1,33 +1,32 @@ %DETECTCHARUCODIAMOND Detect ChArUco Diamond markers % -% [diamondCorners, diamondIds] = cv.detectCharucoDiamond(img, markerCorners, markerIds, squareMarkerLengthRate) -% [...] = cv.detectCharucoDiamond(..., 'OptionName',optionValue, ...) +% [diamondCorners, diamondIds] = cv.detectCharucoDiamond(img, markerCorners, markerIds, squareMarkerLengthRate) +% [...] = cv.detectCharucoDiamond(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image necessary for corner subpixel (8-bit grayscale or -% color). +% color). % * __markerCorners__ cell array of detected marker corners from -% cv.detectMarkers function `{{[x,y],..}, ..}`. +% cv.detectMarkers function `{{[x,y],..}, ..}`. % * __markerIds__ vector of marker ids in `markerCorners` (0-based). % * __squareMarkerLengthRate__ rate between square and marker length: -% `squareMarkerLengthRate = squareLength/markerLength`. -% The real units are not necessary. +% `squareMarkerLengthRate = squareLength/markerLength`. The real units are +% not necessary. % % ## Output % * __diamondCorners__ output list of detected diamond corners (4 corners per -% diamond). The order is the same than in marker corners: top left, -% top right, bottom right and bottom left. Similar format than the -% corners returned by cv.detectMarkers (e.g `{{[x y],..}, ..}`). +% diamond). The order is the same than in marker corners: top left, top +% right, bottom right and bottom left. Similar format than the corners +% returned by cv.detectMarkers (e.g `{{[x y],..}, ..}`). % * __diamondIds__ ids of the diamonds in `diamondCorners` (0-based). The id -% of each diamond is in fact a vector of four integers, so each diamond -% has 4 ids, which are the ids of the aruco markers composing the -% diamond. +% of each diamond is in fact a vector of four integers, so each diamond has +% 4 ids, which are the ids of the aruco markers composing the diamond. % % ## Options % * __CameraMatrix__ Optional 3x3 camera calibration matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __DistCoeffs__ Optional vector of camera distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % % This function detects Diamond markers from the previous detected ArUco % markers. The diamonds are returned in the `diamondCorners` and `diamondIds` diff --git a/opencv_contrib/+cv/detectMarkers.m b/opencv_contrib/+cv/detectMarkers.m index f1dc1954b..4c456fd67 100644 --- a/opencv_contrib/+cv/detectMarkers.m +++ b/opencv_contrib/+cv/detectMarkers.m @@ -1,136 +1,131 @@ %DETECTMARKERS Basic ArUco marker detection % -% [corners, ids] = cv.detectMarkers(img, dictionary) -% [corners, ids, rejectedImgPoints] = cv.detectMarkers(img, dictionary) -% [...] = cv.detectMarkers(..., 'OptionName',optionValue, ...) +% [corners, ids] = cv.detectMarkers(img, dictionary) +% [corners, ids, rejectedImgPoints] = cv.detectMarkers(img, dictionary) +% [...] = cv.detectMarkers(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image (8-bit grayscale or color). -% * __dictionary__ indicates the type of markers that will be searched. -% You can specify the dictionary as a cell-array that starts with the -% type name followed by option arguments `{Type, ...}`. -% There are three types of dictionaries available: -% * __Predefined__ `{'Predefined', name}` or simply as a string `name`. -% Returns one of the predefined dictionaries. -% * __Custom__ `{'Custom', nMarkers, markerSize, 'BaseDictionary',baseDict}`. -% Generates a new customizable marker dictionary. This creates a -% new dictionary composed by `nMarkers` markers and each markers -% composed by `markerSize*markerSize` bits. If `BaseDictionary` is -% provided, its markers are directly included and the rest are -% generated based on them. If the size of `baseDict` is higher -% than `nMarkers`, only the first `nMarkers` in `baseDict` are -% taken and no new marker is added. -% * __Manual__ `{'Manual', bytesList, markerSize, maxCorrectionBits}`. -% Creates a dictionary/set of markers manually. It contains the -% inner codification. +% * __dictionary__ indicates the type of markers that will be searched. You +% can specify the dictionary as a cell-array that starts with the type name +% followed by option arguments `{Type, ...}`. There are three types of +% dictionaries available: +% * __Predefined__ `{'Predefined', name}` or simply as a string `name`. +% Returns one of the predefined dictionaries. +% * __Custom__ `{'Custom', nMarkers, markerSize, 'BaseDictionary',baseDict}`. +% Generates a new customizable marker dictionary. This creates a new +% dictionary composed by `nMarkers` markers and each markers composed by +% `markerSize*markerSize` bits. If `BaseDictionary` is provided, its +% markers are directly included and the rest are generated based on them. +% If the size of `baseDict` is higher than `nMarkers`, only the first +% `nMarkers` in `baseDict` are taken and no new marker is added. +% * __Manual__ `{'Manual', bytesList, markerSize, maxCorrectionBits}`. +% Creates a dictionary/set of markers manually. It contains the inner +% codification. % % ## Output % * __corners__ cell array of detected marker corners. For each marker, its -% four corners are provided `{{[x1,y1],[x2,y2],[x3,y3],[x4,y4]}, ..}`. -% The order of the corners is clockwise. +% four corners are provided `{{[x1,y1],[x2,y2],[x3,y3],[x4,y4]}, ..}`. The +% order of the corners is clockwise. % * __ids__ vector of identifiers of the detected markers. The identifier is -% of integer type (0-based). For N detected markers, the size of ids is -% also N. The identifiers have the same order than the markers in the -% `corners` array. -% * __rejectedImgPoints__ contains the `corners` of those squares whose -% inner code has not a correct codification. Useful for debugging -% purposes. +% of integer type (0-based). For N detected markers, the size of ids is also +% N. The identifiers have the same order than the markers in the `corners` +% array. +% * __rejectedImgPoints__ contains the `corners` of those squares whose inner +% code has not a correct codification. Useful for debugging purposes. % % ## Options -% * __DetectorParameters__ marker detection parameters. A struct of -% parameters for the detection process, with the following fields: -% * __adaptiveThreshWinSizeMin__ minimum window size for adaptive -% thresholding before finding contours (default 3). -% * __adaptiveThreshWinSizeMax__ maximum window size for adaptive -% thresholding before finding contours (default 23). -% * __adaptiveThreshWinSizeStep__ increments from -% `AdaptiveThreshWinSizeMin` to `AdaptiveThreshWinSizeMax` during -% the thresholding (default 10). -% * __adaptiveThreshConstant__ constant for adaptive thresholding before -% finding contours (default 7) -% * __minMarkerPerimeterRate__ determine minimum perimeter for marker -% contour to be detected. This is defined as a rate respect to the -% maximum dimension of the input image (default 0.03). -% * __maxMarkerPerimeterRate__ determine maximum perimeter for marker -% contour to be detected. This is defined as a rate respect to the -% maximum dimension of the input image (default 4.0). -% * __polygonalApproxAccuracyRate__ minimum accuracy during the -% polygonal approximation process to determine which contours are -% squares (default 0.03). -% * __minCornerDistanceRate__ minimum distance between corners for -% detected markers relative to its perimeter (default 0.05) -% * __minDistanceToBorder__ minimum distance of any corner to the image -% border for detected markers (in pixels) (default 3) -% * __minMarkerDistanceRate__ minimum mean distance beetween two marker -% corners to be considered similar, so that the smaller one is -% removed. The rate is relative to the smaller perimeter of the -% two markers (default 0.05). -% * __cornerRefinementMethod__ corner refinement method, one of: -% * __None__ (default) no refinement. -% * __Subpix__ do subpixel refinement (cv.cornerSubPix). -% * __Contour__ refine the corners using the contour-points. -% * __cornerRefinementWinSize__ window size for the corner refinement -% process (in pixels) (default 5). -% * __cornerRefinementMaxIterations__ maximum number of iterations for -% stop criteria of the corner refinement process (default 30). -% * __cornerRefinementMinAccuracy__ minimum error for the stop cristeria -% of the corner refinement process (default 0.1) -% * __markerBorderBits__ number of bits of the marker border, i.e. -% marker border width (default 1). -% * __perpectiveRemovePixelPerCell__ number of bits (per dimension) for -% each cell of the marker when removing the perspective -% (default 4). -% * __perspectiveRemoveIgnoredMarginPerCell__ width of the margin of -% pixels on each cell not considered for the determination of the -% cell bit. Represents the rate respect to the total size of the -% cell, i.e. `PerpectiveRemovePixelPerCell` (default 0.13) -% * __maxErroneousBitsInBorderRate__ maximum number of accepted -% erroneous bits in the border (i.e. number of allowed white bits -% in the border). Represented as a rate respect to the total -% number of bits per marker (default 0.35). -% * __minOtsuStdDev__ minimun standard deviation in pixels values during -% the decodification step to apply Otsu thresholding (otherwise, -% all the bits are set to 0 or 1 depending on mean higher than 128 -% or not) (default 5.0) -% * __errorCorrectionRate__error correction rate respect to the maximum -% error correction capability for each dictionary. (default 0.6). +% * __DetectorParameters__ marker detection parameters. A struct of parameters +% for the detection process, with the following fields: +% * __adaptiveThreshWinSizeMin__ minimum window size for adaptive +% thresholding before finding contours (default 3). +% * __adaptiveThreshWinSizeMax__ maximum window size for adaptive +% thresholding before finding contours (default 23). +% * __adaptiveThreshWinSizeStep__ increments from `AdaptiveThreshWinSizeMin` +% to `AdaptiveThreshWinSizeMax` during the thresholding (default 10). +% * __adaptiveThreshConstant__ constant for adaptive thresholding before +% finding contours (default 7) +% * __minMarkerPerimeterRate__ determine minimum perimeter for marker +% contour to be detected. This is defined as a rate respect to the maximum +% dimension of the input image (default 0.03). +% * __maxMarkerPerimeterRate__ determine maximum perimeter for marker +% contour to be detected. This is defined as a rate respect to the maximum +% dimension of the input image (default 4.0). +% * __polygonalApproxAccuracyRate__ minimum accuracy during the polygonal +% approximation process to determine which contours are squares +% (default 0.03). +% * __minCornerDistanceRate__ minimum distance between corners for detected +% markers relative to its perimeter (default 0.05) +% * __minDistanceToBorder__ minimum distance of any corner to the image +% border for detected markers (in pixels) (default 3) +% * __minMarkerDistanceRate__ minimum mean distance beetween two marker +% corners to be considered similar, so that the smaller one is removed. +% The rate is relative to the smaller perimeter of the two markers +% (default 0.05). +% * __cornerRefinementMethod__ corner refinement method, one of: +% * __None__ (default) no refinement. +% * __Subpix__ do subpixel refinement (cv.cornerSubPix). +% * __Contour__ refine the corners using the contour-points. +% * __cornerRefinementWinSize__ window size for the corner refinement +% process (in pixels) (default 5). +% * __cornerRefinementMaxIterations__ maximum number of iterations for stop +% criteria of the corner refinement process (default 30). +% * __cornerRefinementMinAccuracy__ minimum error for the stop cristeria of +% the corner refinement process (default 0.1) +% * __markerBorderBits__ number of bits of the marker border, i.e. marker +% border width (default 1). +% * __perpectiveRemovePixelPerCell__ number of bits (per dimension) for each +% cell of the marker when removing the perspective (default 4). +% * __perspectiveRemoveIgnoredMarginPerCell__ width of the margin of pixels +% on each cell not considered for the determination of the cell bit. +% Represents the rate respect to the total size of the cell, i.e. +% `PerpectiveRemovePixelPerCell` (default 0.13) +% * __maxErroneousBitsInBorderRate__ maximum number of accepted erroneous +% bits in the border (i.e. number of allowed white bits in the border). +% Represented as a rate respect to the total number of bits per marker +% (default 0.35). +% * __minOtsuStdDev__ minimun standard deviation in pixels values during the +% decodification step to apply Otsu thresholding (otherwise, all the bits +% are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0) +% * __errorCorrectionRate__ error correction rate respect to the maximum +% error correction capability for each dictionary. (default 0.6). % * __CameraMatrix__ Optional 3x3 camera calibration matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __DistCoeffs__ Optional vector of camera distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % % ## Inputs for Predefined Dictionary % * __name__ name of predefined markers dictionaries/sets. Each dictionary -% indicates the number of bits and the number of markers contained: -% * `4x4_50`, `4x4_100`, `4x4_250`, `4x4_1000`: 4x4 bits with -% (50|100|250|1000) markers. -% * `5x5_50`, `5x5_100`, `5x5_250`, `5x5_1000`: 5x5 bits with -% (50|100|250|1000) markers. -% * `6x6_50`, `6x6_100`, `6x6_250`, `6x6_1000`: 6x6 bits with -% (50|100|250|1000) markers. -% * `7x7_50`, `7x7_100`, `7x7_250`, `7x7_1000`: 7x7 bits with -% (50|100|250|1000) markers. -% * `ArucoOriginal`: standard ArUco Library Markers. -% 1024 markers, 5x5 bits, 0 minimum distance. +% indicates the number of bits and the number of markers contained: +% * `4x4_50`, `4x4_100`, `4x4_250`, `4x4_1000`: 4x4 bits with +% (50|100|250|1000) markers. +% * `5x5_50`, `5x5_100`, `5x5_250`, `5x5_1000`: 5x5 bits with +% (50|100|250|1000) markers. +% * `6x6_50`, `6x6_100`, `6x6_250`, `6x6_1000`: 6x6 bits with +% (50|100|250|1000) markers. +% * `7x7_50`, `7x7_100`, `7x7_250`, `7x7_1000`: 7x7 bits with +% (50|100|250|1000) markers. +% * `ArucoOriginal`: standard ArUco Library Markers. 1024 markers, 5x5 bits, +% 0 minimum distance. % % ## Inputs for Custom Dictionary % * __nMarkers__ number of markers in the dictionary. % * __markerSize__ number of bits per dimension of each markers. % * __baseDict__ (Optional) include the markers in this dictionary at the -% beginning. It is specified in the same format as the parent dictionary -% (i.e. a cell-array `{Type, ...}`). +% beginning. It is specified in the same format as the parent dictionary +% (i.e. a cell-array `{Type, ...}`). % % ## Inputs for Manual Dictionary % * __bytesList__ marker code information. Note that the `bytesList` is given -% in the form of a matrix of bits which is then converted to list of -% bytes in the 4 rotations using `Dictionary::getByteListFromBits`. -% It contains the marker codewords where: -% * the number of rows is the dictionary size -% * each marker is encoded using `nbytes = ceil(markerSize*markerSize/8)` -% * each row contains all 4 rotations of the marker, so its length is -% `4*nbytes` -% * `bytesList(i,k*nbytes+j)` is then the j-th byte of i-th marker, in -% its k-th rotation. +% in the form of a matrix of bits which is then converted to list of bytes +% in the 4 rotations using `Dictionary::getByteListFromBits`. It contains +% the marker codewords where: +% * the number of rows is the dictionary size +% * each marker is encoded using `nbytes = ceil(markerSize*markerSize/8)` +% * each row contains all 4 rotations of the marker, so its length is +% `4*nbytes` +% * `bytesList(i,k*nbytes+j)` is then the j-th byte of i-th marker, in its +% k-th rotation. % * __markerSize__ number of bits per dimension. % * __maxCorrectionBits__ maximum number of bits that can be corrected. % diff --git a/opencv_contrib/+cv/dictionaryDump.m b/opencv_contrib/+cv/dictionaryDump.m index 356c8c934..b459dfb31 100644 --- a/opencv_contrib/+cv/dictionaryDump.m +++ b/opencv_contrib/+cv/dictionaryDump.m @@ -1,16 +1,16 @@ %DICTIONARYDUMP Dump dictionary (aruco) % -% s = cv.dictionaryDump(dictionary) +% s = cv.dictionaryDump(dictionary) % % ## Input % * __dictionary__ dictionary of markers. % % ## Output % * __s__ Output struct with the following fields: -% * __bytesList__ -% * __markerSize__ -% * __maxCorrectionBits__ -% * __bits__ +% * __bytesList__ +% * __markerSize__ +% * __maxCorrectionBits__ +% * __bits__ % % See also: cv.detectMarkers, cv.boardDump % diff --git a/opencv_contrib/+cv/drawAxis.m b/opencv_contrib/+cv/drawAxis.m index b143f4a82..fef5d118f 100644 --- a/opencv_contrib/+cv/drawAxis.m +++ b/opencv_contrib/+cv/drawAxis.m @@ -1,18 +1,18 @@ %DRAWAXIS Draw coordinate system axis from pose estimation % -% img = cv.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, length) +% img = cv.drawAxis(img, cameraMatrix, distCoeffs, rvec, tvec, length) % % ## Input % * __img__ input image. It must have 1 or 3 channels. In the output, the -% number of channels is not altered. +% number of channels is not altered. % * __cameraMatrix__ input 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % * __rvec__ rotation vector of the coordinate system that will be drawn. % * __tvec__ translation vector of the coordinate system that will be drawn. % * __length__ length of the painted axis in the same unit as `tvec` -% (usually in meters). +% (usually in meters). % % ## Output % * __img__ output image. diff --git a/opencv_contrib/+cv/drawCharucoBoard.m b/opencv_contrib/+cv/drawCharucoBoard.m index c7dd20f9a..66d2a7eae 100644 --- a/opencv_contrib/+cv/drawCharucoBoard.m +++ b/opencv_contrib/+cv/drawCharucoBoard.m @@ -1,7 +1,7 @@ %DRAWCHARUCOBOARD Draw a ChArUco board % -% img = cv.drawCharucoBoard(board, outSize) -% img = cv.drawCharucoBoard(..., 'OptionName',optionValue, ...) +% img = cv.drawCharucoBoard(board, outSize) +% img = cv.drawCharucoBoard(..., 'OptionName',optionValue, ...) % % ## Input % * __board__ ChArUco board that will be drawn. @@ -9,12 +9,12 @@ % % ## Output % * __img__ output image with the board. The size of this image will be -% `outSize` and the board will be on the center, keeping the board -% proportions. +% `outSize` and the board will be on the center, keeping the board +% proportions. % % ## Options % * __MarginSize__ minimum margins (in pixels) of the board in the output -% image. default 0 +% image. default 0 % * __BorderBits__ width of the marker borders. default 1 % % This function return the image of the ChArUco board, ready to be printed. diff --git a/opencv_contrib/+cv/drawCharucoDiamond.m b/opencv_contrib/+cv/drawCharucoDiamond.m index 4befd4c1e..09614bb1b 100644 --- a/opencv_contrib/+cv/drawCharucoDiamond.m +++ b/opencv_contrib/+cv/drawCharucoDiamond.m @@ -1,7 +1,7 @@ %DRAWCHARUCODIAMOND Draw a ChArUco Diamond marker % -% img = cv.drawCharucoDiamond(dictionary, ids, squareLength, markerLength) -% img = cv.drawCharucoDiamond(..., 'OptionName',optionValue, ...) +% img = cv.drawCharucoDiamond(dictionary, ids, squareLength, markerLength) +% img = cv.drawCharucoDiamond(..., 'OptionName',optionValue, ...) % % ## Input % * __dictionary__ dictionary of markers indicating the type of markers. @@ -11,11 +11,11 @@ % % ## Output % * __img__ output image with the marker. The size of this image will be -% `3*squareLength + 2*marginSize`. +% `3*squareLength + 2*marginSize`. % % ## Options % * __MarginSize__ minimum margins (in pixels) of the marker in the output -% image. default 0 +% image. default 0 % * __BorderBits__ width of the marker borders. default 1 % % This function return the image of a ChArUco marker, ready to be printed. diff --git a/opencv_contrib/+cv/drawDetectedCornersCharuco.m b/opencv_contrib/+cv/drawDetectedCornersCharuco.m index a1d9b020d..fd557546f 100644 --- a/opencv_contrib/+cv/drawDetectedCornersCharuco.m +++ b/opencv_contrib/+cv/drawDetectedCornersCharuco.m @@ -1,11 +1,11 @@ %DRAWDETECTEDCORNERSCHARUCO Draws a set of Charuco corners % -% img = cv.drawDetectedCornersCharuco(img, charucoCorners) -% img = cv.drawDetectedCornersCharuco(..., 'OptionName',optionValue, ...) +% img = cv.drawDetectedCornersCharuco(img, charucoCorners) +% img = cv.drawDetectedCornersCharuco(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image. It must have 1 or 3 channels. In the output, the -% number of channels is not altered. +% number of channels is not altered. % * __charucoCorners__ cell array of detected charuco corners `{[x,y], ..}`. % % ## Output @@ -14,7 +14,7 @@ % ## Options % * __IDs__ Optional vector of identifiers for each corner in `charucoCorners`. % * __CornerColor__ color of the square surrounding each corner. -% default [255,0,0]. +% default [255,0,0]. % % This function draws a set of detected Charuco corners. If identifiers % vector is provided, it also draws the id of each corner. diff --git a/opencv_contrib/+cv/drawDetectedDiamonds.m b/opencv_contrib/+cv/drawDetectedDiamonds.m index d156289f7..e26305525 100644 --- a/opencv_contrib/+cv/drawDetectedDiamonds.m +++ b/opencv_contrib/+cv/drawDetectedDiamonds.m @@ -1,26 +1,24 @@ %DRAWDETECTEDDIAMONDS Draw a set of detected ChArUco Diamond markers % -% img = cv.drawDetectedDiamonds(img, diamondCorners) -% img = cv.drawDetectedDiamonds(..., 'OptionName',optionValue, ...) +% img = cv.drawDetectedDiamonds(img, diamondCorners) +% img = cv.drawDetectedDiamonds(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image. It must have 1 or 3 channels. In the output, the -% number of channels is not altered. +% number of channels is not altered. % * __diamondCorners__ positions of diamond corners in the same format -% returned by cv.detectCharucoDiamond. (e.g `{{[x y],..}, ..}`). The -% order of the corners should be clockwise. +% returned by cv.detectCharucoDiamond. (e.g `{{[x y],..}, ..}`). The order +% of the corners should be clockwise. % % ## Output % * __img__ output image. % % ## Options % * __IDs__ vector of identifiers for diamonds in `diamondCorners` (0-based), -% in the same format returned by cv.detectCharucoDiamond (e.g. cell -% array of 4-integer vectors). Optional, if not provided, ids are -% not painted. +% in the same format returned by cv.detectCharucoDiamond (e.g. cell array of +% 4-integer vectors). Optional, if not provided, ids are not painted. % * __BorderColor__ color of marker borders. Rest of colors (text color and -% first corner color) are calculated based on this one. -% Default [0,0,255]. +% first corner color) are calculated based on this one. Default [0,0,255]. % % Given an array of detected diamonds, this functions draws them in the image. % The marker borders are painted and the markers identifiers if provided. diff --git a/opencv_contrib/+cv/drawDetectedMarkers.m b/opencv_contrib/+cv/drawDetectedMarkers.m index 0e8bf312c..3eba7de2f 100644 --- a/opencv_contrib/+cv/drawDetectedMarkers.m +++ b/opencv_contrib/+cv/drawDetectedMarkers.m @@ -1,23 +1,23 @@ %DRAWDETECTEDMARKERS Draw detected markers in image % -% img = cv.drawDetectedMarkers(img, corners) -% img = cv.drawDetectedMarkers(..., 'OptionName',optionValue, ...) +% img = cv.drawDetectedMarkers(img, corners) +% img = cv.drawDetectedMarkers(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image. It must have 1 or 3 channels. In the output, the -% number of channels is not altered. +% number of channels is not altered. % * __corners__ positions of marker corners on input image. (e.g -% `{{[x,y],..}, ..}`). The order of the corners should be clockwise. +% `{{[x,y],..}, ..}`). The order of the corners should be clockwise. % % ## Output % * __img__ output image. % % ## Options % * __IDs__ vector of identifiers for markers in `corners` (0-based). -% Optional, if not provided, ids are not painted. +% Optional, if not provided, ids are not painted. % * __BorderColor__ color of marker borders. Rest of colors (text color and -% first corner color) are calculated based on this one to improve -% visualization. default [0,255,0]. +% first corner color) are calculated based on this one to improve +% visualization. default [0,255,0]. % % Given an array of detected marker corners and its corresponding ids, this % functions draws the markers in the image. The marker borders are painted diff --git a/opencv_contrib/+cv/drawKeylines.m b/opencv_contrib/+cv/drawKeylines.m index 248cdb8f4..1f72d9f75 100644 --- a/opencv_contrib/+cv/drawKeylines.m +++ b/opencv_contrib/+cv/drawKeylines.m @@ -1,7 +1,7 @@ %DRAWKEYLINES Draws keylines % -% outImg = cv.drawKeylines(im, keylines) -% outImg = cv.drawKeylines(im, keylines, 'OptionName', optionValue, ...) +% outImg = cv.drawKeylines(im, keylines) +% outImg = cv.drawKeylines(im, keylines, 'OptionName', optionValue, ...) % % ## Input % * __im__ input image. @@ -12,10 +12,10 @@ % % ## Options % * __Color__ color of lines to be drawn (if set to defaul value, color is -% chosen randomly). default [-1,-1,-1,-1]. +% chosen randomly). default [-1,-1,-1,-1]. % * __OutImage__ If set, keylines will be drawn on existing content of output -% image, otherwise source image is used instead. Not set by default. -% (i.e keylines are drawn on top of `im`). +% image, otherwise source image is used instead. Not set by default. +% (i.e keylines are drawn on top of `im`). % % See also: cv.drawKeypoints, cv.drawLineMatches, cv.LSDDetector, % cv.BinaryDescriptor diff --git a/opencv_contrib/+cv/drawLineMatches.m b/opencv_contrib/+cv/drawLineMatches.m index f5092cf87..2b8de0ce4 100644 --- a/opencv_contrib/+cv/drawLineMatches.m +++ b/opencv_contrib/+cv/drawLineMatches.m @@ -1,14 +1,13 @@ %DRAWLINEMATCHES Draws the found matches of keylines from two images % -% outImg = cv.drawLineMatches(img1, keypoints1, img2, keypoints2, matches1to2) -% outImg = cv.drawLineMatches(..., 'OptionName', optionValue, ...) +% outImg = cv.drawLineMatches(img1, keypoints1, img2, keypoints2, matches1to2) +% outImg = cv.drawLineMatches(..., 'OptionName', optionValue, ...) % % ## Input % * __img1__ First image. % * __keypoints1__ keylines extracted from first image. % * __img2__ Second image. % * __keypoints2__ keylines extracted from second image. -% `keypoints1`. % * __matches1to2__ vector of matches. % % ## Output @@ -16,14 +15,14 @@ % % ## Options % * __MatchColor__ drawing color for matches (chosen randomly in case of -% default value). default [-1,-1,-1,-1]. +% default value). default [-1,-1,-1,-1]. % * __SingleLineColor__ drawing color for keylines (chosen randomly in case of -% default value). default [-1,-1,-1,-1]. +% default value). default [-1,-1,-1,-1]. % * __MatchesMask__ mask to indicate which matches must be drawn. -% default empty. +% default empty. % * __NotDrawSingleLines__ Single keylines will not be drawn. default false % * __OutImage__ If set, matches will be drawn on existing content of output -% image, otherwise source images are used instead. Not set by default. +% image, otherwise source images are used instead. Not set by default. % % If both `MatchColor` and `SingleLineColor` are set to their default values, % function draws matched lines and line connecting them with same color. diff --git a/opencv_contrib/+cv/drawMarkerAruco.m b/opencv_contrib/+cv/drawMarkerAruco.m index d220a4d0b..9903ce88d 100644 --- a/opencv_contrib/+cv/drawMarkerAruco.m +++ b/opencv_contrib/+cv/drawMarkerAruco.m @@ -1,12 +1,12 @@ %DRAWMARKERARUCO Draw a canonical marker image % -% img = cv.drawMarkerAruco(dictionary, id, sidePixels) -% img = cv.drawMarkerAruco(..., 'OptionName',optionValue, ...) +% img = cv.drawMarkerAruco(dictionary, id, sidePixels) +% img = cv.drawMarkerAruco(..., 'OptionName',optionValue, ...) % % ## Input % * __dictionary__ dictionary of markers indicating the type of markers. % * __id__ identifier of the marker that will be returned. It has to be a -% valid id in the specified dictionary (0-based). +% valid id in the specified dictionary (0-based). % * __sidePixels__ size of the image in pixels. % % ## Output @@ -15,8 +15,8 @@ % ## Options % * __BorderBits__ width of the marker border. default 1 % -% This function returns a marker image in its canonical form (i.e. ready to -% be printed). +% This function returns a marker image in its canonical form (i.e. ready to be +% printed). % % See also: cv.estimatePoseBoard, cv.drawPlanarBoard, cv.drawCharucoBoard % diff --git a/opencv_contrib/+cv/drawPlanarBoard.m b/opencv_contrib/+cv/drawPlanarBoard.m index 766d9473e..073e5097a 100644 --- a/opencv_contrib/+cv/drawPlanarBoard.m +++ b/opencv_contrib/+cv/drawPlanarBoard.m @@ -1,21 +1,21 @@ %DRAWPLANARBOARD Draw a planar board % -% img = cv.drawPlanarBoard(board, outSize) -% img = cv.drawPlanarBoard(..., 'OptionName',optionValue, ...) +% img = cv.drawPlanarBoard(board, outSize) +% img = cv.drawPlanarBoard(..., 'OptionName',optionValue, ...) % % ## Input % * __board__ layout of the board that will be drawn. The board should be -% planar, z coordinate is ignored. +% planar, z coordinate is ignored. % * __outSize__ size of the output image in pixels `[w,h]`. % % ## Output % * __img__ output image with the board. The size of this image will be -% `outSize` and the board will be on the center, keeping the board -% proportions. +% `outSize` and the board will be on the center, keeping the board +% proportions. % % ## Options % * __MarginSize__ minimum margins (in pixels) of the board in the output -% image. default 0 +% image. default 0 % * __BorderBits__ width of the marker borders. default 1 % % This function return the image of a planar board, ready to be printed. It diff --git a/opencv_contrib/+cv/estimatePoseBoard.m b/opencv_contrib/+cv/estimatePoseBoard.m index 5b042d0d7..ea76f58ed 100644 --- a/opencv_contrib/+cv/estimatePoseBoard.m +++ b/opencv_contrib/+cv/estimatePoseBoard.m @@ -1,82 +1,78 @@ %ESTIMATEPOSEBOARD Pose estimation for a board of markers % -% [rvec, tvec, num] = cv.estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs) -% [rvec, tvec, num] = cv.estimatePoseBoard(..., 'OptionName',optionValue, ...) +% [rvec, tvec, num] = cv.estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs) +% [rvec, tvec, num] = cv.estimatePoseBoard(..., 'OptionName',optionValue, ...) % % ## Input % * __corners__ cell array of already detected markers corners. For each -% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). The -% order of the corners should be clockwise. +% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). The order +% of the corners should be clockwise. % * __ids__ list of identifiers for each marker in `corners` (0-based). % * __board__ layout of markers in the board. The layout is composed by the -% marker identifiers and the positions of each marker corner in the -% board reference system. -% You can specify the board as a cell-array that starts with the -% type name followed by option arguments `{Type, ...}`. -% There are three types of boards available: -% * __Board__ `{'Board', objPoints, dictionary, ids}`. -% Creates a board of markers. -% * __GridBoard__ `{'GridBoard', markersX, markersY, markerLength, markerSeparation, dictionary, 'FirstMarker',firstMarker}`. -% Creates a a GridBoard object given the number of markers in each -% direction and the marker size and marker separation. -% * __CharucoBoard__ `{'GridBoard', squaresX, squaresY, squareLength, markerLength, dictionary}`. -% Creates a CharucoBoard object given the number of squares in -% each direction and the size of the markers and chessboard -% squares. +% marker identifiers and the positions of each marker corner in the board +% reference system. You can specify the board as a cell-array that starts +% with the type name followed by option arguments `{Type, ...}`. There are +% three types of boards available: +% * __Board__ `{'Board', objPoints, dictionary, ids}`. +% Creates a board of markers. +% * __GridBoard__ `{'GridBoard', markersX, markersY, markerLength, markerSeparation, dictionary, 'FirstMarker',firstMarker}`. +% Creates a a GridBoard object given the number of markers in each +% direction and the marker size and marker separation. +% * __CharucoBoard__ `{'GridBoard', squaresX, squaresY, squareLength, markerLength, dictionary}`. +% Creates a CharucoBoard object given the number of squares in each +% direction and the size of the markers and chessboard squares. % * __cameraMatrix__ input 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % % ## Output % * __rvec__ Output vector `[x,y,z]` corresponding to the rotation vector of -% the board. +% the board. % * __tvec__ Output vector `[x,y,z]` corresponding to the translation vector -% of the board. +% of the board. % * __num__ The number of markers from the input employed for the board pose -% estimation. Note that returning a 0 means the pose has not been -% estimated. +% estimation. Note that returning a 0 means the pose has not been estimated. % % ## Options % * __Rvec__, __Tvec__ Initial `rvec` and `tvec`. Used as initial guess if not -% empty. The function uses the provided values as initial approximations -% of the rotation and translation vectors, respectively, and further -% optimizes them. Not set by default. +% empty. The function uses the provided values as initial approximations of +% the rotation and translation vectors, respectively, and further optimizes +% them. Not set by default. % * __UseExtrinsicGuess__ defines whether initial guess for `rvec` and `tvec` -% will be used or not. default false. +% will be used or not. default false. % % ## Inputs for Board % * __objPoints__ array of object points of all the marker corners in the -% board, i.e. their coordinates with respect to the board system. Each -% marker include its 4 corners in CCW order -% `{{[x1,y1,z1],[x2,y2,z2],[x3,y3,z3],[x4,y4,z4]}, ..}`. Usually object -% points are planar with Z=0. +% board, i.e. their coordinates with respect to the board system. Each +% marker include its 4 corners in CCW order +% `{{[x1,y1,z1],[x2,y2,z2],[x3,y3,z3],[x4,y4,z4]}, ..}`. Usually object +% points are planar with Z=0. % * __dictionary__ the dictionary of markers employed for this board. This is -% specified in the same format described in cv.detectMarkers. +% specified in the same format described in cv.detectMarkers. % * __ids__ vector of the identifiers of the markers in the board (same size -% as `objPoints`). The identifiers refers to the board dictionary -% (0-based). +% as `objPoints`). The identifiers refers to the board dictionary (0-based). % % ## Inputs for GridBoard % * __markersX__ number of markers in X direction. % * __markersY__ number of markers in Y direction. % * __markerLength__ marker side length (normally in meters). % * __markerSeparation__ separation between two markers in the grid (same unit -% as `markerLength`). +% as `markerLength`). % * __dictionary__ dictionary of markers indicating the type of markers. This -% is specified in the same format described in cv.detectMarkers. +% is specified in the same format described in cv.detectMarkers. % * __FirstMarker__ optional 0-based id of first marker in dictionary to use -% on board. default 0 +% on board. default 0 % % ## Inputs for CharucoBoard % * __squaresX__ number of chessboard squares in X direction. % * __squaresY__ number of chessboard squares in Y direction. % * __squareLength__ chessboard square side length (normally in meters). % * __markerLength__ marker side length (same unit as `squareLength`) -% * __dictionary__ dictionary of markers indicating the type of markers. -% The first markers in the dictionary are used to fill the white -% chessboard squares. This is specified in the same format described in -% cv.detectMarkers. +% * __dictionary__ dictionary of markers indicating the type of markers. The +% first markers in the dictionary are used to fill the white chessboard +% squares. This is specified in the same format described in +% cv.detectMarkers. % % The cv.estimatePoseBoard function receives the detected markers and returns % the pose of a marker board composed by those markers. A board of marker has diff --git a/opencv_contrib/+cv/estimatePoseCharucoBoard.m b/opencv_contrib/+cv/estimatePoseCharucoBoard.m index bf8dc304f..24d197dbc 100644 --- a/opencv_contrib/+cv/estimatePoseCharucoBoard.m +++ b/opencv_contrib/+cv/estimatePoseCharucoBoard.m @@ -1,33 +1,33 @@ %ESTIMATEPOSECHARUCOBOARD Pose estimation for a ChArUco board given some of their corners % -% [rvec, tvec, valid] = cv.estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs) -% [...] = cv.estimatePoseCharucoBoard(..., 'OptionName',optionValue, ...) +% [rvec, tvec, valid] = cv.estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs) +% [...] = cv.estimatePoseCharucoBoard(..., 'OptionName',optionValue, ...) % % ## Input % * __charucoCorners__ cell array of detected charuco corners `{[x,y], ..}`. % * __charucoIds__ vector of identifiers for each corner in `charucoCorners` -% (0-based). +% (0-based). % * __board__ layout of ChArUco board. % * __cameraMatrix__ input 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % % ## Output % * __rvec__ Output vector corresponding to the rotation vector of the board. % * __tvec__ Output vector corresponding to the translation vector of the -% board. +% board. % * __valid__ The function checks if the input corners are enough and valid -% to perform pose estimation. If pose estimation is valid, returns true, -% else returns false. +% to perform pose estimation. If pose estimation is valid, returns true, +% else returns false. % % ## Options % * __Rvec__, __Tvec__ Initial `rvec` and `tvec`. Used as initial guess if not -% empty. The function uses the provided values as initial approximations -% of the rotation and translation vectors, respectively, and further -% optimizes them. Not set by default. +% empty. The function uses the provided values as initial approximations of +% the rotation and translation vectors, respectively, and further optimizes +% them. Not set by default. % * __UseExtrinsicGuess__ defines whether initial guess for `rvec` and `tvec` -% will be used or not. default false. +% will be used or not. default false. % % This function estimates a Charuco board pose from some detected corners. % diff --git a/opencv_contrib/+cv/estimatePoseSingleMarkers.m b/opencv_contrib/+cv/estimatePoseSingleMarkers.m index 83ad20745..fbf48aa3a 100644 --- a/opencv_contrib/+cv/estimatePoseSingleMarkers.m +++ b/opencv_contrib/+cv/estimatePoseSingleMarkers.m @@ -1,29 +1,27 @@ %ESTIMATEPOSESINGLEMARKERS Pose estimation for single markers % -% [rvecs, tvecs] = cv.estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs) -% [rvecs, tvecs, objPoints] = cv.estimatePoseSingleMarkers(...) +% [rvecs, tvecs] = cv.estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs) +% [rvecs, tvecs, objPoints] = cv.estimatePoseSingleMarkers(...) % % ## Input % * __corners__ cell array of already detected markers corners. For each -% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). The -% order of the corners should be clockwise. +% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). The order +% of the corners should be clockwise. % * __markerLength__ the length of the markers' side. The returning -% translation vectors will be in the same unit. Normally, unit is meters. +% translation vectors will be in the same unit. Normally, unit is meters. % * __cameraMatrix__ input 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __distCoeffs__ vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % % ## Output % * __rvecs__ cell array of output rotation vectors (e.g. `{[x,y,z], ...}`). -% Each element in `rvecs` corresponds to the specific marker in -% `corners`. +% Each element in `rvecs` corresponds to the specific marker in `corners`. % * __tvecs__ cell array of output translation vectors (e.g. `{[x,y,z], ...}`). -% Each element in `tvecs` corresponds to the specific marker in -% `corners`. +% Each element in `tvecs` corresponds to the specific marker in `corners`. % * __objPoints__ optional output, array of object points of the marker four -% corners (object points for the system centered in a single marker -% given the marker length, see explanation below). +% corners (object points for the system centered in a single marker given +% the marker length, see explanation below). % % This function receives the detected markers and returns their pose % estimation respect to the camera individually. So for each marker, one @@ -34,10 +32,10 @@ % The coordinates of the four corners of the marker in its own coordinate % system are: % -% (-markerLength/2, markerLength/2, 0), -% ( markerLength/2, markerLength/2, 0), -% ( markerLength/2, -markerLength/2, 0), -% (-markerLength/2, -markerLength/2, 0) +% (-markerLength/2, markerLength/2, 0), +% ( markerLength/2, markerLength/2, 0), +% ( markerLength/2, -markerLength/2, 0), +% (-markerLength/2, -markerLength/2, 0) % % See also: cv.detectMarkers, cv.estimatePoseBoard, cv.Rodrigues, cv.solvePnP % diff --git a/opencv_contrib/+cv/getBoardObjectAndImagePoints.m b/opencv_contrib/+cv/getBoardObjectAndImagePoints.m index 2267707b1..6799b2c44 100644 --- a/opencv_contrib/+cv/getBoardObjectAndImagePoints.m +++ b/opencv_contrib/+cv/getBoardObjectAndImagePoints.m @@ -1,6 +1,6 @@ %GETBOARDOBJECTANDIMAGEPOINTS Given a board configuration and a set of detected markers, returns the corresponding image points and object points to call solvePnP % -% [objPoints, imgPoints] = cv.getBoardObjectAndImagePoints(board, corners, ids) +% [objPoints, imgPoints] = cv.getBoardObjectAndImagePoints(board, corners, ids) % % ## Input % * __board__ Marker board layout. @@ -9,9 +9,9 @@ % % ## Output % * __objPoints__ Cell array of board marker points in the board coordinate -% space `{[x y z], ...}` +% space `{[x y z], ...}` % * __imgPoints__ Cell array of the projections of board marker corner points -% `{[x y], ...}` +% `{[x y], ...}` % % See also: cv.estimatePoseBoard, cv.solvePnP % diff --git a/opencv_contrib/+cv/inpaint2.m b/opencv_contrib/+cv/inpaint2.m index 387c60344..61e4adafa 100644 --- a/opencv_contrib/+cv/inpaint2.m +++ b/opencv_contrib/+cv/inpaint2.m @@ -1,28 +1,26 @@ %INPAINT2 The function implements different single-image inpainting algorithms % -% dst = cv.inpaint2(src, mask) -% dst = cv.inpaint2(src, mask, 'OptionName', optionValue, ...) +% dst = cv.inpaint2(src, mask) +% dst = cv.inpaint2(src, mask, 'OptionName', optionValue, ...) % % ## Input % * __src__ source image, it could be of any type (8/16/32-bit integers or -% 32/64-bit floating-points) and any number of channels from 1 to 4. In -% case of 3- and 4-channels images the function expect them in CIELab -% colorspace or similar one, where first color component shows -% intensity, while second and third shows colors. Nonetheless you can -% try any colorspaces. +% 32/64-bit floating-points) and any number of channels from 1 to 4. In case +% of 3- and 4-channels images the function expect them in CIELab colorspace +% or similar one, where first color component shows intensity, while second +% and third shows colors. Nonetheless you can try any colorspaces. % * __mask__ mask (8-bit 1-channel of same size as `src`), where non-zero -% pixels indicate valid image area, while zero pixels indicate area to -% be inpainted. +% pixels indicate valid image area, while zero pixels indicate area to be +% inpainted. % % ## Output % * __dst__ Output image with the same size and type as `src`. % % ## Options % * __Method__ Inpainting algorithms, one of: -% * __ShiftMap__ (default) This algorithm searches for dominant -% correspondences (transformations) of image patches and tries to -% seamlessly fill-in the area to be inpainted using this -% transformations. +% * __ShiftMap__ (default) This algorithm searches for dominant +% correspondences (transformations) of image patches and tries to +% seamlessly fill-in the area to be inpainted using this transformations. % % The function reconstructs the selected image area from known area. % See the original paper [He2012] for details. diff --git a/opencv_contrib/+cv/interpolateCornersCharuco.m b/opencv_contrib/+cv/interpolateCornersCharuco.m index 2ab9d2024..84839cb8e 100644 --- a/opencv_contrib/+cv/interpolateCornersCharuco.m +++ b/opencv_contrib/+cv/interpolateCornersCharuco.m @@ -1,17 +1,17 @@ %INTERPOLATECORNERSCHARUCO Interpolate position of ChArUco board corners % -% [charucoCorners, charucoIds, num] = cv.interpolateCornersCharuco(markerCorners, markerIds, img, board) -% [...] = cv.interpolateCornersCharuco(..., 'OptionName',optionValue, ...) +% [charucoCorners, charucoIds, num] = cv.interpolateCornersCharuco(markerCorners, markerIds, img, board) +% [...] = cv.interpolateCornersCharuco(..., 'OptionName',optionValue, ...) % % ## Input % * __markerCorners__ cell array of already detected markers corners. For each -% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). -% The order of the corners should be clockwise. +% marker, its four corners are provided, (e.g `{{[x,y],..}, ..}`). The order +% of the corners should be clockwise. % * __markerIds__ vector of identifiers for each marker in `markerCorners` -% (0-based). +% (0-based). % * __img__ input image necesary for corner refinement (8-bit grayscale or -% color). Note that markers are not detected and should be sent in -% `markerCorners` and `markerIds` parameters. +% color). Note that markers are not detected and should be sent in +% `markerCorners` and `markerIds` parameters. % * __board__ layout of ChArUco board. % % ## Output @@ -21,11 +21,11 @@ % % ## Options % * __CameraMatrix__ optional 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __DistCoeffs__ optional vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % * __MinMarkers__ number of adjacent markers that must be detected to return -% a charuco corner. default 2 +% a charuco corner. default 2 % % This function receives the detected markers and returns the 2D position of % the chessboard corners from a ChArUco board using the detected Aruco diff --git a/opencv_contrib/+cv/jointBilateralFilter.m b/opencv_contrib/+cv/jointBilateralFilter.m index f9f414442..800cefdbc 100644 --- a/opencv_contrib/+cv/jointBilateralFilter.m +++ b/opencv_contrib/+cv/jointBilateralFilter.m @@ -1,11 +1,11 @@ %JOINTBILATERALFILTER Applies the joint bilateral filter to an image % -% dst = cv.jointBilateralFilter(src, joint) -% dst = cv.jointBilateralFilter(src, joint, 'OptionName',optionValue, ...) +% dst = cv.jointBilateralFilter(src, joint) +% dst = cv.jointBilateralFilter(src, joint, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit or floating-point, 1-channel or 3-channel image with -% the same depth as `joint` image. +% the same depth as `joint` image. % * __joint__ Joint 8-bit or floating-point, 1-channel or 3-channel image. % % ## Output @@ -13,19 +13,19 @@ % % ## Options % * __Diameter__ Diameter of each pixel neighborhood that is used during -% filtering. If it is non-positive, it is computed from `SigmaSpace`. -% default -1 +% filtering. If it is non-positive, it is computed from `SigmaSpace`. +% default -1 % * __SigmaColor__ Filter sigma in the color space. A larger value of the -% parameter means that farther colors within the pixel neighborhood (see -% `SigmaSpace`) will be mixed together, resulting in larger areas of -% semi-equal color. default 25.0 +% parameter means that farther colors within the pixel neighborhood (see +% `SigmaSpace`) will be mixed together, resulting in larger areas of +% semi-equal color. default 25.0 % * __SigmaSpace__ Filter sigma in the coordinate space. A larger value of the -% parameter means that farther pixels will influence each other as long -% as their colors are close enough (see `SigmaColor`). When `Diameter>0`, -% it specifies the neighborhood size regardless of `SigmaSpace`. -% Otherwise, `Diameter` is proportional to `SigmaSpace`. default 10.0 +% parameter means that farther pixels will influence each other as long as +% their colors are close enough (see `SigmaColor`). When `Diameter>0`, it +% specifies the neighborhood size regardless of `SigmaSpace`. Otherwise, +% `Diameter` is proportional to `SigmaSpace`. default 10.0 % * __BorderType__ Border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. default 'Default' +% image. See cv.copyMakeBorder. default 'Default' % % Note that cv.bilateralFilter and cv.jointBilateralFilter use L1 norm to % compute difference between colors. diff --git a/opencv_contrib/+cv/l0Smooth.m b/opencv_contrib/+cv/l0Smooth.m index 8b650dbc2..39c25d215 100644 --- a/opencv_contrib/+cv/l0Smooth.m +++ b/opencv_contrib/+cv/l0Smooth.m @@ -1,11 +1,11 @@ %L0SMOOTH Global image smoothing via L0 gradient minimization % -% dst = cv.l0Smooth(src) -% dst = cv.l0Smooth(src, 'OptionName',optionValue, ...) +% dst = cv.l0Smooth(src) +% dst = cv.l0Smooth(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ source image for filtering with unsigned 8-bit or signed 16-bit or -% floating-point depth. +% floating-point depth. % % ## Output % * __dst__ destination image. @@ -13,7 +13,7 @@ % ## Options % * __Lambda__ parameter defining the smooth term weight. default 0.02 % * __Kappa__ parameter defining the increasing factor of the weight of the -% gradient data term. default 2.0 +% gradient data term. default 2.0 % % For more details about L0 Smoother, see the original paper [xu2011image]. % diff --git a/opencv_contrib/+cv/niBlackThreshold.m b/opencv_contrib/+cv/niBlackThreshold.m index bbf1aa258..aaba8c5ae 100644 --- a/opencv_contrib/+cv/niBlackThreshold.m +++ b/opencv_contrib/+cv/niBlackThreshold.m @@ -1,69 +1,69 @@ %NIBLACKTHRESHOLD Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired % -% dst = cv.niBlackThreshold(src, k) -% dst = cv.niBlackThreshold(src, k, 'OptionName',optionValue, ...) +% dst = cv.niBlackThreshold(src, k) +% dst = cv.niBlackThreshold(src, k, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit single-channel image. % * __k__ The user-adjustable parameter used by Niblack and inspired -% techniques. For Niblack, this is normally a value between 0 and 1 -% (its absolute value) that is multiplied with the standard deviation -% and subtracted from the mean (`mean + k * std`). +% techniques. For Niblack, this is normally a value between 0 and 1 +% (its absolute value) that is multiplied with the standard deviation and +% subtracted from the mean (`mean + k * std`). % % ## Output % * __dst__ Destination image of the same size and the same type as `src`. % % ## Options % * __MaxValue__ Non-zero value assigned to the pixels for which the condition -% is satisfied, used with the `Binary` and `BinaryInv` thresholding -% types. default 255 +% is satisfied, used with the `Binary` and `BinaryInv` thresholding types. +% default 255 % * __Type__ Thresholding type, default 'Binary'. One of: -% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxValue : 0` -% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxValue` -% * __Trunc__ `dst(x,y) = (src(x,y) > thresh) ? thresh : src(x,y)` -% * __ToZero__ `dst(x,y) = (src(x,y) > thresh) ? src(x,y) : 0` -% * __ToZeroInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : src(x,y)` +% * __Binary__ `dst(x,y) = (src(x,y) > thresh) ? maxValue : 0` +% * __BinaryInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : maxValue` +% * __Trunc__ `dst(x,y) = (src(x,y) > thresh) ? thresh : src(x,y)` +% * __ToZero__ `dst(x,y) = (src(x,y) > thresh) ? src(x,y) : 0` +% * __ToZeroInv__ `dst(x,y) = (src(x,y) > thresh) ? 0 : src(x,y)` % * __BlockSize__ Size of a pixel neighborhood that is used to calculate a -% threshold value for the pixel: 3, 5, 7, and so on. default 5 +% threshold value for the pixel: 3, 5, 7, and so on. default 5 % * __Method__ Binarization method to use. By default, Niblack's technique is -% used. Other techniques can be specified: -% * __Niblack__ Classic Niblack binarization. See [Niblack1985]. -% * __Sauvola__ Sauvola's technique. See [Sauvola1997]. -% * __Wolf__ Wolf's technique. See [Wolf2004]. -% * __Nick__ NICK technique. See [Khurshid2009]. +% used. Other techniques can be specified: +% * __Niblack__ Classic Niblack binarization. See [Niblack1985]. +% * __Sauvola__ Sauvola's technique. See [Sauvola1997]. +% * __Wolf__ Wolf's technique. See [Wolf2004]. +% * __Nick__ NICK technique. See [Khurshid2009]. % % The function transforms a grayscale image to a binary image according to the % formulae: % % * __Binary__ % -% | maxValue, if src(x,y) > T(x,y) -% dst(x,y) = | -% | 0, otherwise +% | maxValue, if src(x,y) > T(x,y) +% dst(x,y) = | +% | 0, otherwise % % * __BinaryInv__ % -% | 0, if src(x,y) > T(x,y) -% dst(x,y) = | -% | maxValue, otherwise +% | 0, if src(x,y) > T(x,y) +% dst(x,y) = | +% | maxValue, otherwise % % * __Trunc__ % -% | T(x,y), if src(x,y) > T(x,y) -% dst(x,y) = | -% | src(x,y), otherwise +% | T(x,y), if src(x,y) > T(x,y) +% dst(x,y) = | +% | src(x,y), otherwise % % * __ToZero__ % -% | src(x,y), if src(x,y) > T(x,y) -% dst(x,y) = | -% | 0, otherwise +% | src(x,y), if src(x,y) > T(x,y) +% dst(x,y) = | +% | 0, otherwise % % * __ToZeroInv__ % -% | 0, if src(x,y) > T(x,y) -% dst(x,y) = | -% | src(x,y), otherwise +% | 0, if src(x,y) > T(x,y) +% dst(x,y) = | +% | src(x,y), otherwise % % where `T(x,y)` is a local threshold calculated individually for each pixel. % diff --git a/opencv_contrib/+cv/private/.gitignore b/opencv_contrib/+cv/private/.gitkeep similarity index 100% rename from opencv_contrib/+cv/private/.gitignore rename to opencv_contrib/+cv/private/.gitkeep diff --git a/opencv_contrib/+cv/readOpticalFlow.m b/opencv_contrib/+cv/readOpticalFlow.m index 6264ff575..4ee51af99 100644 --- a/opencv_contrib/+cv/readOpticalFlow.m +++ b/opencv_contrib/+cv/readOpticalFlow.m @@ -1,14 +1,14 @@ %READOPTICALFLOW Read a .flo file % -% flow = cv.readOpticalFlow(path) +% flow = cv.readOpticalFlow(path) % % ## Input % * __path__ Path to the file to be loaded. % % ## Output % * __flow__ Flow field of `single` floating-point type, 2-channel. First -% channel corresponds to the flow in the horizontal direction (`u`), -% second - vertical (`v`). +% channel corresponds to the flow in the horizontal direction (`u`), second +% the vertical (`v`). % % Function for reading .flo files in "Middlebury" format, see: % [Middlebury](http://vision.middlebury.edu/flow/code/flow-code/README.txt) diff --git a/opencv_contrib/+cv/refineDetectedMarkers.m b/opencv_contrib/+cv/refineDetectedMarkers.m index 35e80938b..1745c1681 100644 --- a/opencv_contrib/+cv/refineDetectedMarkers.m +++ b/opencv_contrib/+cv/refineDetectedMarkers.m @@ -1,39 +1,39 @@ %REFINEDETECTEDMARKERS Refind not detected markers based on the already detected and the board layout % -% [detectedCorners, detectedIds, rejectedCorners] = cv.refineDetectedMarkers(img, board, detectedCorners, detectedIds, rejectedCorners) -% [detectedCorners, detectedIds, rejectedCorners, recoveredIdxs] = cv.refineDetectedMarkers(...) -% [...] = cv.refineDetectedMarkers(..., 'OptionName',optionValue, ...) +% [detectedCorners, detectedIds, rejectedCorners] = cv.refineDetectedMarkers(img, board, detectedCorners, detectedIds, rejectedCorners) +% [detectedCorners, detectedIds, rejectedCorners, recoveredIdxs] = cv.refineDetectedMarkers(...) +% [...] = cv.refineDetectedMarkers(..., 'OptionName',optionValue, ...) % % ## Input % * __img__ input image (8-bit grayscale or color). % * __board__ layout of markers in the board. % * __detectedCorners__ cell array of already detected marker corners -% `{{[x,y],..}, ..}`. +% `{{[x,y],..}, ..}`. % * __detectedIds__ vector of already detected marker identifiers (0-based). % * __rejectedCorners__ cell array of rejected candidates during the marker -% detection process `{{[x,y],..}, ..}`. +% detection process `{{[x,y],..}, ..}`. % % ## Output % * __detectedCorners__ output refined marker corners. % * __detectedIds__ output refined marker identifiers % * __rejectedCorners__ output refined rejected corners. % * __recoveredIdxs__ Optional array that returns the indexes of the recovered -% candidates in the original `rejectedCorners` array. +% candidates in the original `rejectedCorners` array. % % ## Options % * __CameraMatrix__ optional input 3x3 floating-point camera matrix -% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. +% `A = [fx 0 cx; 0 fy cy; 0 0 1]`. % * __DistCoeffs__ optional vector of distortion coefficients -% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. +% `[k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4]` of 4, 5, 8 or 12 elements. % * __MinRepDistance__ minimum distance between the corners of the rejected -% candidate and the reprojected marker in order to consider it as a -% correspondence. default 10.0 +% candidate and the reprojected marker in order to consider it as a +% correspondence. default 10.0 % * __ErrorCorrectionRate__ rate of allowed erroneous bits respect to the -% error correction capability of the used dictionary. -1 ignores the -% error correction step. default 3.0 +% error correction capability of the used dictionary. -1 ignores the error +% correction step. default 3.0 % * __CheckAllOrders__ Consider the four posible corner orders in the -% `rejectedCorners` array. If it set to false, only the provided corner -% order is considered (default true). +% `rejectedCorners` array. If it set to false, only the provided corner +% order is considered (default true). % * __DetectorParameters__ marker detection parameters. % % This function tries to find markers that were not detected in the basic diff --git a/opencv_contrib/+cv/rollingGuidanceFilter.m b/opencv_contrib/+cv/rollingGuidanceFilter.m index 6be40b96c..826616d25 100644 --- a/opencv_contrib/+cv/rollingGuidanceFilter.m +++ b/opencv_contrib/+cv/rollingGuidanceFilter.m @@ -1,7 +1,7 @@ %ROLLINGGUIDANCEFILTER Applies the rolling guidance filter to an image % -% dst = cv.rollingGuidanceFilter(src) -% dst = cv.rollingGuidanceFilter(src, 'OptionName',optionValue, ...) +% dst = cv.rollingGuidanceFilter(src) +% dst = cv.rollingGuidanceFilter(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit or floating-point, 1-channel or 3-channel image. @@ -11,21 +11,21 @@ % % ## Options % * __Diameter__ Diameter of each pixel neighborhood that is used during -% filtering. If it is non-positive, it is computed from `SigmaSpace`. -% default -1 +% filtering. If it is non-positive, it is computed from `SigmaSpace`. +% default -1 % * __SigmaColor__ Filter sigma in the color space. A larger value of the -% parameter means that farther colors within the pixel neighborhood (see -% `SigmaSpace`) will be mixed together, resulting in larger areas of -% semi-equal color. default 25.0 +% parameter means that farther colors within the pixel neighborhood (see +% `SigmaSpace`) will be mixed together, resulting in larger areas of +% semi-equal color. default 25.0 % * __SigmaSpace__ Filter sigma in the coordinate space. A larger value of the -% parameter means that farther pixels will influence each other as long -% as their colors are close enough (see `SigmaColor`). When `Diameter>0`, -% it specifies the neighborhood size regardless of `SigmaSpace`. -% Otherwise, `Diameter` is proportional to `SigmaSpace`. default 3.0 +% parameter means that farther pixels will influence each other as long as +% their colors are close enough (see `SigmaColor`). When `Diameter>0`, it +% specifies the neighborhood size regardless of `SigmaSpace`. Otherwise, +% `Diameter` is proportional to `SigmaSpace`. default 3.0 % * __NumIter__ Number of iterations of joint edge-preserving filtering -% applied on the source image. default 4 +% applied on the source image. default 4 % * __BorderType__ Border mode used to extrapolate pixels outside of the -% image. See cv.copyMakeBorder. default 'Default' +% image. See cv.copyMakeBorder. default 'Default' % % For more details, please see [zhang2014rolling]. % diff --git a/opencv_contrib/+cv/segmentMotion.m b/opencv_contrib/+cv/segmentMotion.m index 803af8874..da68bf80d 100644 --- a/opencv_contrib/+cv/segmentMotion.m +++ b/opencv_contrib/+cv/segmentMotion.m @@ -1,16 +1,16 @@ %SEGMENTMOTION Splits a motion history image into a few parts corresponding to separate independent motions (for example, left hand, right hand) % -% [segmask, boundingRects] = cv.segmentMotion(mhi, timestamp, segThresh) +% [segmask, boundingRects] = cv.segmentMotion(mhi, timestamp, segThresh) % % ## Input % * __mhi__ Motion history image. % * __timestamp__ Current time in milliseconds or other units. % * __segThresh__ Segmentation threshold that is recommended to be equal to -% the interval between motion history "steps" or greater. +% the interval between motion history "steps" or greater. % % ## Output % * __segmask__ Image where the found mask should be stored, single-channel, -% 32-bit floating-point. +% 32-bit floating-point. % * __boundingRects__ Vector containing ROIs of motion connected components. % % The function finds all of the motion segments and marks them in `segmask` diff --git a/opencv_contrib/+cv/thinning.m b/opencv_contrib/+cv/thinning.m index 0cb2d5477..4774dc2fa 100644 --- a/opencv_contrib/+cv/thinning.m +++ b/opencv_contrib/+cv/thinning.m @@ -1,19 +1,19 @@ %THINNING Applies a binary blob thinning operation, to achieve a skeletization of the input image % -% dst = cv.thinning(src) -% dst = cv.thinning(src, 'OptionName',optionValue, ...) +% dst = cv.thinning(src) +% dst = cv.thinning(src, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit single-channel image, containing binary blobs, with -% blobs having 255 pixel values. +% blobs having 255 pixel values. % % ## Output % * __dst__ Destination image of the same size and the same type as `src`. % % ## Options % * __ThinningType__ Which thinning algorithm should be used. One of: -% * __ZhangSuen__ Thinning technique of Zhang-Suen (default). -% * __GuoHall__ Thinning technique of Guo-Hall. +% * __ZhangSuen__ Thinning technique of Zhang-Suen (default). +% * __GuoHall__ Thinning technique of Guo-Hall. % % The function transforms a binary blob image into a skeletized form using the % technique of Zhang-Suen. diff --git a/opencv_contrib/+cv/updateMotionHistory.m b/opencv_contrib/+cv/updateMotionHistory.m index 28ab39bd0..822023a25 100644 --- a/opencv_contrib/+cv/updateMotionHistory.m +++ b/opencv_contrib/+cv/updateMotionHistory.m @@ -1,25 +1,25 @@ %UPDATEMOTIONHISTORY Updates the motion history image by a moving silhouette % -% mhi = cv.updateMotionHistory(silhouette, mhi, timestamp, duration) +% mhi = cv.updateMotionHistory(silhouette, mhi, timestamp, duration) % % ## Input % * __silhouette__ Silhouette mask that has non-zero pixels where the motion -% occurs. +% occurs. % * __mhi__ Input motion history image to be updated by the function -% (single-channel, 32-bit floating-point). +% (single-channel, 32-bit floating-point). % * __timestamp__ Current time in milliseconds or other units. % * __duration__ Maximal duration of the motion track in the same units as -% timestamp. +% timestamp. % % ## Output % * __mhi__ Updated motion history image (single-channel, 32-bit -% floating-point). +% floating-point). % % The function updates the motion history image as follows: % -% | timestamp if silhouette(x,y) != 0 -% mhi(x,y) = | 0 if silhouette(x,y) == 0 and mhi < (timestamp < duration) -% | mhi(x,y) otherwise +% | timestamp if silhouette(x,y) != 0 +% mhi(x,y) = | 0 if silhouette(x,y) == 0 and mhi < (timestamp < duration) +% | mhi(x,y) otherwise % % That is, `mhi` pixels where the motion occurs are set to the current % timestamp, while the pixels where the motion happened last time a long time diff --git a/opencv_contrib/+cv/weightedMedianFilter.m b/opencv_contrib/+cv/weightedMedianFilter.m index 8ae9e633a..b346ebd7d 100644 --- a/opencv_contrib/+cv/weightedMedianFilter.m +++ b/opencv_contrib/+cv/weightedMedianFilter.m @@ -1,7 +1,7 @@ %WEIGHTEDMEDIANFILTER Applies weighted median filter to an image % -% dst = cv.weightedMedianFilter(src, joint) -% dst = cv.weightedMedianFilter(src, joint, 'OptionName',optionValue, ...) +% dst = cv.weightedMedianFilter(src, joint) +% dst = cv.weightedMedianFilter(src, joint, 'OptionName',optionValue, ...) % % ## Input % * __src__ Source 8-bit or floating-point, 1-channel or 3-channel image. @@ -12,22 +12,21 @@ % % ## Options % * __Radius__ Radius of filtering kernel, should be a positive integer. -% Default 7 +% Default 7 % * __Sigma__ Filter range standard deviation for the joint image. -% Default 25.5 +% Default 25.5 % * __WeightType__ The type of weight definition. Specifies weight types of -% weighted median filter, default 'EXP'. One of: -% * __EXP__ `exp(-|I1-I2|^2 / (2*sigma^2))` -% * __IV1__ `(|I1-I2| + sigma)^-1` -% * __IV2__ `(|I1-I2|^2 + sigma^2)^-1` -% * __COS__ `dot(I1,I2) / (|I1|*|I2|)` -% * __JAC__ `(min(r1,r2) + min(g1,g2) + min(b1,b2)) / (max(r1,r2) + max(g1,g2) + max(b1,b2))` -% * __OFF__ unweighted +% weighted median filter, default 'EXP'. One of: +% * __EXP__ `exp(-|I1-I2|^2 / (2*sigma^2))` +% * __IV1__ `(|I1-I2| + sigma)^-1` +% * __IV2__ `(|I1-I2|^2 + sigma^2)^-1` +% * __COS__ `dot(I1,I2) / (|I1|*|I2|)` +% * __JAC__ `(min(r1,r2) + min(g1,g2) + min(b1,b2)) / (max(r1,r2) + max(g1,g2) + max(b1,b2))` +% * __OFF__ unweighted % * __Mask__ A 0-1 mask that has the same size with `I`. This mask is used to -% ignore the effect of some pixels. If the pixel value on mask is 0, the -% pixel will be ignored when maintaining the joint-histogram. This is -% useful for applications like optical flow occlusion handling. -% Not set by default. +% ignore the effect of some pixels. If the pixel value on mask is 0, the +% pixel will be ignored when maintaining the joint-histogram. This is useful +% for applications like optical flow occlusion handling. Not set by default. % % For more details about this implementation, please see [zhang2014100+]. % diff --git a/opencv_contrib/+cv/writeOpticalFlow.m b/opencv_contrib/+cv/writeOpticalFlow.m index 70788a94d..4a82931f4 100644 --- a/opencv_contrib/+cv/writeOpticalFlow.m +++ b/opencv_contrib/+cv/writeOpticalFlow.m @@ -1,7 +1,7 @@ %WRITEOPTICALFLOW Write a .flo to disk % -% cv.writeOpticalFlow(path, flow) -% success = cv.writeOpticalFlow(path, flow) +% cv.writeOpticalFlow(path, flow) +% success = cv.writeOpticalFlow(path, flow) % % ## Input % * __path__ Path to the file to be written. diff --git a/opencv_contrib/lib/.gitignore b/opencv_contrib/lib/.gitkeep similarity index 100% rename from opencv_contrib/lib/.gitignore rename to opencv_contrib/lib/.gitkeep diff --git a/opencv_contrib/samples/BackgroundSubtractorDemo.m b/opencv_contrib/samples/BackgroundSubtractorDemo.m index e82870d22..adc0e40d8 100644 --- a/opencv_contrib/samples/BackgroundSubtractorDemo.m +++ b/opencv_contrib/samples/BackgroundSubtractorDemo.m @@ -17,12 +17,14 @@ % "A benchmark dataset for outdoor foreground/background extraction". In % Computer Vision-ACCV 2012 Workshops, pages 291-300. Springer, 2013. % -% , -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * +% * % %% Theory @@ -36,7 +38,7 @@ % static part of the scene or, more in general, everything that can be % considered as background given the characteristics of the observed scene. % -% <> +% <> % % Background modeling consists of two main steps: % diff --git a/opencv_contrib/samples/SIFT_detector.m b/opencv_contrib/samples/SIFT_detector.m index dc60fecbe..9d6c0dc2f 100644 --- a/opencv_contrib/samples/SIFT_detector.m +++ b/opencv_contrib/samples/SIFT_detector.m @@ -5,7 +5,9 @@ % * about the concepts of SIFT algorithm % * how to find SIFT Keypoints and Descriptors. % -% +% Sources: +% +% * % %% Theory @@ -18,7 +20,7 @@ % small image within a small window is flat when it is zoomed in the same % window. So Harris corner is not scale invariant. % -% <> +% <> % % So, in 2004, *D.Lowe*, University of British Columbia, came up with a new % algorithm, Scale Invariant Feature Transform (SIFT) in his paper, @@ -51,7 +53,7 @@ % for different octaves of the image in Gaussian Pyramid. It is represented in % below image: % -% <> +% <> % % Once this DoG are found, images are searched for local extrema over scale % and space. For exmple, one pixel in an image is compared with its 8 @@ -60,7 +62,7 @@ % means that keypoint is best represented in that scale. It is shown in below % image: % -% <> +% <> % % Regarding different parameters, the paper gives some empirical data which % can be summarized as, number of octaves = 4, number of scale levels = 5, diff --git a/opencv_contrib/samples/SURF_descriptor.m b/opencv_contrib/samples/SURF_descriptor.m index 01a6ef79d..494185d1a 100644 --- a/opencv_contrib/samples/SURF_descriptor.m +++ b/opencv_contrib/samples/SURF_descriptor.m @@ -14,9 +14,11 @@ % * Use the function to draw the % detected matches. % -% , -% , -% +% Sources: +% +% * +% * +% * % %% Brute-Force Matcher @@ -54,7 +56,7 @@ % % FLANN stands for Fast Library for Approximate Nearest Neighbors. It contains % a collection of algorithms optimized for fast nearest neighbor search in -% large datasets and for high dimensional features. It works more faster than +% large datasets and for high dimensional features. It works faster than % |BFMatcher| for large datasets. We will see the second example with FLANN % based matcher. % diff --git a/opencv_contrib/samples/SURF_detector.m b/opencv_contrib/samples/SURF_detector.m index ab5c14880..b967853d6 100644 --- a/opencv_contrib/samples/SURF_detector.m +++ b/opencv_contrib/samples/SURF_detector.m @@ -10,9 +10,10 @@ % * How to use the function % to draw the detected keypoints. % -% See also: -% , -% +% Sources: +% +% * +% * % %% Theory @@ -31,7 +32,7 @@ % done in parallel for different scales. Also the SURF rely on determinant of % Hessian matrix for both scale and location. % -% <> +% <> % % For orientation assignment, SURF uses wavelet responses in horizontal and % vertical direction for a neighbourhood of size 6s. Adequate Gaussian weights @@ -44,9 +45,9 @@ % process. SURF provides such a functionality called Upright-SURF or U-SURF. % It improves speed and is robust upto $\pm 15^{\circ}$. OpenCV supports both, % depending upon the flag, |Upright|. If it is 0, orientation is calculated. -% If it is 1, orientation is not calculated and it is more faster. +% If it is 1, orientation is not calculated and it is faster. % -% <> +% <> % % For feature description, SURF uses Wavelet responses in horizontal and % vertical direction (again, use of integral images makes things easier). A @@ -74,7 +75,7 @@ % of contrast (as shown in image below). This minimal information allows for % faster matching, without reducing the descriptor's performance. % -% <> +% <> % % In short, SURF adds a lot of features to improve the speed in every step. % Analysis shows it is 3 times faster than SIFT while performance is diff --git a/opencv_contrib/samples/anisodiff_demo_gui.m b/opencv_contrib/samples/anisodiff_demo_gui.m index 8aaf75ade..6f3edb3b2 100644 --- a/opencv_contrib/samples/anisodiff_demo_gui.m +++ b/opencv_contrib/samples/anisodiff_demo_gui.m @@ -2,7 +2,9 @@ % % This sample demonstrates Perona-Malik anisotropic diffusion. % -% +% Sources: +% +% * % function varargout = anisodiff_demo_gui(im) diff --git a/opencv_contrib/samples/aruco_calibrate_camera_charuco_demo.m b/opencv_contrib/samples/aruco_calibrate_camera_charuco_demo.m index 1d01c6792..206b4ee10 100644 --- a/opencv_contrib/samples/aruco_calibrate_camera_charuco_demo.m +++ b/opencv_contrib/samples/aruco_calibrate_camera_charuco_demo.m @@ -5,8 +5,10 @@ % If input comes from video, press any key for next frame % To finish capturing, press 'ESC' key and calibration starts. % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/aruco_calibrate_camera_demo.m b/opencv_contrib/samples/aruco_calibrate_camera_demo.m index af5c88719..b5a9e59b1 100644 --- a/opencv_contrib/samples/aruco_calibrate_camera_demo.m +++ b/opencv_contrib/samples/aruco_calibrate_camera_demo.m @@ -5,8 +5,10 @@ % If input comes from video, press any key for next frame % To finish capturing, press 'ESC' key and calibration starts. % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/aruco_create_board_charuco_demo.m b/opencv_contrib/samples/aruco_create_board_charuco_demo.m index 6a891d2f5..eb9efa9e6 100644 --- a/opencv_contrib/samples/aruco_create_board_charuco_demo.m +++ b/opencv_contrib/samples/aruco_create_board_charuco_demo.m @@ -1,7 +1,9 @@ %% ChArUco Board Image Demo % This example shows how to create a ChArUco board image. % -% +% Sources: +% +% * % %% Board diff --git a/opencv_contrib/samples/aruco_create_board_demo.m b/opencv_contrib/samples/aruco_create_board_demo.m index e595b2efc..3fd10f0a9 100644 --- a/opencv_contrib/samples/aruco_create_board_demo.m +++ b/opencv_contrib/samples/aruco_create_board_demo.m @@ -1,7 +1,9 @@ %% ArUco Grid Board Image Demo % This example shows how to create an ArUco grid board image. % -% +% Sources: +% +% * % %% Board diff --git a/opencv_contrib/samples/aruco_create_diamond_demo.m b/opencv_contrib/samples/aruco_create_diamond_demo.m index ca92b8caa..d15ec2a39 100644 --- a/opencv_contrib/samples/aruco_create_diamond_demo.m +++ b/opencv_contrib/samples/aruco_create_diamond_demo.m @@ -1,7 +1,9 @@ %% ChArUco Marker Image Demo % This example shows how to create a ChArUco marker image. % -% +% Sources: +% +% * % %% Diamond diff --git a/opencv_contrib/samples/aruco_create_marker_demo.m b/opencv_contrib/samples/aruco_create_marker_demo.m index 5f07a24d9..e1af879b5 100644 --- a/opencv_contrib/samples/aruco_create_marker_demo.m +++ b/opencv_contrib/samples/aruco_create_marker_demo.m @@ -1,7 +1,9 @@ %% ArUco Marker Image Demo % This example shows how to create an ArUco marker image. % -% +% Sources: +% +% * % %% Marker diff --git a/opencv_contrib/samples/aruco_detect_board_charuco_demo.m b/opencv_contrib/samples/aruco_detect_board_charuco_demo.m index 744a49f50..5bfee5e9e 100644 --- a/opencv_contrib/samples/aruco_detect_board_charuco_demo.m +++ b/opencv_contrib/samples/aruco_detect_board_charuco_demo.m @@ -1,8 +1,10 @@ %% Detection of ChArUco Corners Demo % The example shows how to do pose estimation using a ChArUco board. % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/aruco_detect_board_demo.m b/opencv_contrib/samples/aruco_detect_board_demo.m index 802b04b40..76cc6584c 100644 --- a/opencv_contrib/samples/aruco_detect_board_demo.m +++ b/opencv_contrib/samples/aruco_detect_board_demo.m @@ -2,8 +2,10 @@ % Detection and pose estimation using a Board of markers % (an ArUco Planar Grid board). % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/aruco_detect_diamonds_demo.m b/opencv_contrib/samples/aruco_detect_diamonds_demo.m index 11d200ad6..658f57128 100644 --- a/opencv_contrib/samples/aruco_detect_diamonds_demo.m +++ b/opencv_contrib/samples/aruco_detect_diamonds_demo.m @@ -1,8 +1,10 @@ %% Detection of Diamond Markers Demo % Detection and pose estimation using ChArUco markers. % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/aruco_detect_markers_demo.m b/opencv_contrib/samples/aruco_detect_markers_demo.m index 80a460245..acd0297a5 100644 --- a/opencv_contrib/samples/aruco_detect_markers_demo.m +++ b/opencv_contrib/samples/aruco_detect_markers_demo.m @@ -1,8 +1,10 @@ %% Detection of ArUco Markers Demo % Basic marker detection and pose estimation from single ArUco markers. % -% -% +% Sources: +% +% * +% * % %% Parameters diff --git a/opencv_contrib/samples/color_balance_demo.m b/opencv_contrib/samples/color_balance_demo.m index 70c4b5065..33418ac63 100644 --- a/opencv_contrib/samples/color_balance_demo.m +++ b/opencv_contrib/samples/color_balance_demo.m @@ -1,7 +1,9 @@ %% White Balance Demo % OpenCV color balance demonstration sample. % -% +% Sources: +% +% * % %% Load input image diff --git a/opencv_contrib/samples/computeSaliency_demo.m b/opencv_contrib/samples/computeSaliency_demo.m index 8b01e61f6..e93990edf 100644 --- a/opencv_contrib/samples/computeSaliency_demo.m +++ b/opencv_contrib/samples/computeSaliency_demo.m @@ -1,7 +1,9 @@ %% Saliency algorithms demo % This example shows the functionality of "Saliency" % -% +% Sources: +% +% * % %% Options @@ -65,7 +67,6 @@ error('Unrecognized saliency algorithm'); end disp(saliency) -fprintf('className = %s\n', saliency.getClassName()); %% Compute saliency switch alg @@ -90,8 +91,8 @@ saliency.init(); % prepare plots - subplot(121); hImg(1) = imshow(zeros(sz(1:2),'uint8')); title('img') - subplot(122); hImg(2) = imshow(zeros(sz(1:2),'single')); title('saliencyMap') + subplot(121); hImg(1) = imshow(frame); title('img') + subplot(122); hImg(2) = imshow(false(sz(1:2))); title('saliencyMap') % loop over frames while all(ishghandle(hImg)) @@ -104,9 +105,10 @@ fprintf('frame #%3d: ', cap.PosFrames); frame = cv.cvtColor(frame, 'RGB2GRAY'); tic, saliencyMap = saliency.computeSaliency(frame); toc + saliencyMap = logical(saliencyMap); % show - %NOTE: for the first dozen frames, saliency is all 1.0 + %NOTE: for the first dozen frames, saliency is all 1 set(hImg(2), 'CData',saliencyMap) drawnow end diff --git a/opencv_contrib/samples/dataset_mnist_demo.m b/opencv_contrib/samples/dataset_mnist_demo.m index 0b3e22291..6c5f78c6a 100644 --- a/opencv_contrib/samples/dataset_mnist_demo.m +++ b/opencv_contrib/samples/dataset_mnist_demo.m @@ -1,7 +1,9 @@ %% The MNIST dataset of handwritten digits % Demonstrates loading the . % -% +% Sources: +% +% * % %% MNIST dataset diff --git a/opencv_contrib/samples/demos.xml b/opencv_contrib/samples/demos.xml new file mode 100644 index 000000000..0430b4f63 --- /dev/null +++ b/opencv_contrib/samples/demos.xml @@ -0,0 +1,289 @@ + + + mexopencv (contrib) + toolbox + HelpIcon.DEMOS + Examples to demonstrate how to use OpenCV in MATLAB. + Homepage + + + + + + + M-file + aruco_create_marker_demo + + + + M-file + aruco_detect_markers_demo + + + + M-file + aruco_create_board_demo + + + + M-file + aruco_detect_board_demo + + + + M-file + aruco_create_board_charuco_demo + + + + M-file + aruco_detect_board_charuco_demo + + + + M-file + aruco_create_diamond_demo + + + + M-file + aruco_detect_diamonds_demo + + + + M-file + aruco_calibrate_camera_demo + + + + M-file + aruco_calibrate_camera_charuco_demo + + Image Processing Toolbox + + + + + + + + + M-file + BackgroundSubtractorDemo + + + + + + + + + M-file + retina_demo + + + + M-GUI + retina_hdr_tonemapping_demo_gui + + + + + + + + + M-file + dataset_mnist_demo + + Image Processing Toolbox + + + + + + + + + + + + + + M-file + facerec_demo + + Image Processing Toolbox + + Statistics and Machine Learning Toolbox + + + + + + + + + + + + + + M-file + line_extraction_demo + + + + M-file + line_matching_demo + + Image Processing Toolbox + + + + M-file + line_radius_matching_demo + + + + + + + + + M-file + optical_flow_evaluation_demo + + Image Processing Toolbox + + + + M-file + gpc_demo + + + + + + + + + M-file + plotting_demo + + + + + + + + + M-file + computeSaliency_demo + + + + + + + + + M-file + SIFT_detector + + + + M-file + SURF_detector + + + + M-file + descriptor_extractor_brief_demo + + + + M-file + SURF_descriptor + + + + + + + + + M-file + superpixels_demo + + Image Processing Toolbox + + + + M-file + fast_hough_transform_demo + + Image Processing Toolbox + + + + M-file + structured_edge_detection_demo + + + + M-file + graphsegmentation_demo + + Image Processing Toolbox + + + + M-file + niblack_thresholding_demo + + + + + + M-GUI + dtFilter_demo_gui + + + + M-file + disparity_filtering_demo + + + + M-file + fld_lines_demo + + + + M-GUI + anisodiff_demo_gui + + + + M-file + peilin_demo + + + + + + + + + + + + + + M-file + color_balance_demo + + + + diff --git a/opencv_contrib/samples/descriptor_extractor_brief_demo.m b/opencv_contrib/samples/descriptor_extractor_brief_demo.m index 0e1ad839e..67c1621f2 100644 --- a/opencv_contrib/samples/descriptor_extractor_brief_demo.m +++ b/opencv_contrib/samples/descriptor_extractor_brief_demo.m @@ -2,7 +2,9 @@ % % In this demo, we will see the basics of BRIEF algorithm. % -% +% Sources: +% +% * % %% Theory @@ -84,5 +86,5 @@ % "BRIEF: Binary Robust Independent Elementary Features", 11th European % Conference on Computer Vision (ECCV), Heraklion, Crete. LNCS Springer, % September 2010. -% * LSH (Locality Sensitive Hasing) at wikipedia. +% * . % diff --git a/opencv_contrib/samples/disparity_filtering_demo.m b/opencv_contrib/samples/disparity_filtering_demo.m index 5079ea87a..b8b99bc1f 100644 --- a/opencv_contrib/samples/disparity_filtering_demo.m +++ b/opencv_contrib/samples/disparity_filtering_demo.m @@ -1,9 +1,11 @@ %% Disparity Filtering Demo % In this tutorial you will learn how to use the disparity map post-filtering -% to improve the results of StereoBM and StereoSGBM algorithms. +% to improve the results of |cv.StereoBM| and |cv.StereoSGBM| algorithms. % -% -% +% Sources: +% +% * +% * % %% Introduction diff --git a/opencv_contrib/samples/dtFilter_demo_gui.m b/opencv_contrib/samples/dtFilter_demo_gui.m index dfe778c3d..583878e48 100644 --- a/opencv_contrib/samples/dtFilter_demo_gui.m +++ b/opencv_contrib/samples/dtFilter_demo_gui.m @@ -1,7 +1,9 @@ %% Filtering Demo -% This program demonstrates Domain Transform filtering using cv.DTFilter +% This program demonstrates Domain Transform filtering using |cv.DTFilter|. % -% +% Sources: +% +% * % function varargout = dtFilter_demo_gui(im) diff --git a/opencv_contrib/samples/facerec_demo.m b/opencv_contrib/samples/facerec_demo.m index 2f9ab12ff..dff442fe9 100644 --- a/opencv_contrib/samples/facerec_demo.m +++ b/opencv_contrib/samples/facerec_demo.m @@ -8,12 +8,14 @@ % * Local Binary Patterns Histograms % % See this page for a complete tutorial: -% +% % -% -% -% -% +% Sources: +% +% * +% * +% * +% * % %% Options diff --git a/opencv_contrib/samples/fast_hough_transform_demo.m b/opencv_contrib/samples/fast_hough_transform_demo.m index 97058b83c..23de9217e 100644 --- a/opencv_contrib/samples/fast_hough_transform_demo.m +++ b/opencv_contrib/samples/fast_hough_transform_demo.m @@ -1,7 +1,9 @@ %% Fast Hough transform demo % This program demonstrates line finding with the Fast Hough transform. % -% +% Sources: +% +% * % %% Image diff --git a/opencv_contrib/samples/fld_lines_demo.m b/opencv_contrib/samples/fld_lines_demo.m index ded0df7ce..694c895e7 100644 --- a/opencv_contrib/samples/fld_lines_demo.m +++ b/opencv_contrib/samples/fld_lines_demo.m @@ -1,7 +1,9 @@ %% Fast Line Detector demo -% Compares FastLineDetector against LineSegmentDetector. +% Compares |cv.FastLineDetector| against |cv.LineSegmentDetector|. % -% +% Sources: +% +% * % %% diff --git a/opencv_contrib/samples/gpc_demo.m b/opencv_contrib/samples/gpc_demo.m index c5b37eb00..736d9f4e0 100644 --- a/opencv_contrib/samples/gpc_demo.m +++ b/opencv_contrib/samples/gpc_demo.m @@ -11,8 +11,10 @@ % downloading one of the files trained on some publicly available dataset from % here: % -% -% +% Sources: +% +% * +% * % %% 1) Train diff --git a/opencv_contrib/samples/graphsegmentation_demo.m b/opencv_contrib/samples/graphsegmentation_demo.m index aacc1c1cd..0736bcb79 100644 --- a/opencv_contrib/samples/graphsegmentation_demo.m +++ b/opencv_contrib/samples/graphsegmentation_demo.m @@ -5,7 +5,9 @@ % P. Felzenszwalb, D. Huttenlocher, "Efficient Graph-Based Image Segmentation" % International Journal of Computer Vision, Vol. 59, No. 2, September 2004 % -% +% Sources: +% +% * % %% Load image diff --git a/opencv_contrib/samples/line_extraction_demo.m b/opencv_contrib/samples/line_extraction_demo.m index c557b129b..598a76bd9 100644 --- a/opencv_contrib/samples/line_extraction_demo.m +++ b/opencv_contrib/samples/line_extraction_demo.m @@ -1,6 +1,6 @@ %% Binary Descriptors for Line Segments % This example shows the functionalities of lines extraction and descriptors -% computation furnished by cv.LSDDetector and cv.BinaryDescriptor classes. +% computation furnished by |cv.LSDDetector| and |cv.BinaryDescriptor| classes. % % In this tutorial it will be shown how to: % @@ -8,9 +8,11 @@ % in |KeyLine| objects % * use the same interface to compute descriptors for every extracted line % -% -% -% +% Sources: +% +% * +% * +% * % %% Lines extraction and descriptors computation diff --git a/opencv_contrib/samples/line_matching_demo.m b/opencv_contrib/samples/line_matching_demo.m index 86d0fd327..a73bd147a 100644 --- a/opencv_contrib/samples/line_matching_demo.m +++ b/opencv_contrib/samples/line_matching_demo.m @@ -1,6 +1,6 @@ %% Line descriptors matching demo % This example shows the functionalities of line descriptors matching -% furnished by cv.BinaryDescriptorMatcher class. +% furnished by |cv.BinaryDescriptorMatcher| class. % % This module shows how to extract line segments from an image by 2 different % methods: First segmenting lines with Line Segment Detector |cv.LSDDetector|, @@ -8,7 +8,9 @@ % them a descriptor |cv.BinaryDescriptor|. Finally, we can then match line % segments using the |cv.BinaryDescriptorMatcher| class. % -% +% Sources: +% +% * % %% Matching among descriptors diff --git a/opencv_contrib/samples/line_radius_matching_demo.m b/opencv_contrib/samples/line_radius_matching_demo.m index 60d2d5412..b094d8f7a 100644 --- a/opencv_contrib/samples/line_radius_matching_demo.m +++ b/opencv_contrib/samples/line_radius_matching_demo.m @@ -1,7 +1,9 @@ %% Line descriptors radius matching demo % This example shows the functionalities of radius matching. % -% +% Sources: +% +% * % %% Querying internal database diff --git a/opencv_contrib/samples/niblack_thresholding_demo.m b/opencv_contrib/samples/niblack_thresholding_demo.m index 898364be1..73bcb0e7e 100644 --- a/opencv_contrib/samples/niblack_thresholding_demo.m +++ b/opencv_contrib/samples/niblack_thresholding_demo.m @@ -3,17 +3,17 @@ % (global thresholding and adaptive thresholding) for an image with varying % illumination. % -% -% +% Sources: +% +% * +% * % function niblack_thresholding_demo() - % Parameters + % Input 8-bit grayscale image + Parameters % - BS: block size (local neighborhood) [niblack, adaptive] % - K : constant multiplied by std dev next subtracted from mean [niblack] % - C : constant subtracted from mean [adaptive] - - % Input 8-bit grayscale image if ~mexopencv.isOctave() && mexopencv.require('images') % image with dark pixels being foreground im = which('printedtext.png'); @@ -92,6 +92,9 @@ function niblack_thresholding_demo() function out = localNormalization(img, s1, s2) %LOCALNORMALIZATION local normalization to get uniform local mean and variance % + % out = localNormalization(img) + % out = localNormalization(img, s1, s2) + % % The local normalization tends to uniformize the mean and variance of an % image around a local neighborhood. This is especially useful for correct % non-uniform illumination or shading artifacts. @@ -105,7 +108,7 @@ function niblack_thresholding_demo() % ## Options % * __s1__ sigma to estimate the local mean. default 5 % * __s2__ sigma to estimate the local variance. Often `s2` should be - % larger than `s1`. default 15 + % larger than `s1`. default 15 % % ## References % > http://bigwww.epfl.ch/sage/soft/localnormalization/ diff --git a/opencv_contrib/samples/optical_flow_evaluation_demo.m b/opencv_contrib/samples/optical_flow_evaluation_demo.m index f97b6860f..0e0cfd06c 100644 --- a/opencv_contrib/samples/optical_flow_evaluation_demo.m +++ b/opencv_contrib/samples/optical_flow_evaluation_demo.m @@ -3,7 +3,9 @@ % Computes flow field between two images using various methods and display it % (deepflow, simpleflow, sparsetodenseflow, Farneback, TV-L1). % -% +% Sources: +% +% * % %% Input images @@ -78,12 +80,16 @@ toc % display the flow - [ang, mag] = cart2pol(flow(:,:,1), flow(:,:,2)); - if mexopencv.isOctave() - %HACK: RAD2DEG not implemented in Octave - ang = (ang + pi) * (180 / pi); + if true + [mag, ang] = cv.cartToPolar(flow(:,:,1), flow(:,:,2), 'Degrees',true); else - ang = rad2deg(ang + pi); + [ang, mag] = cart2pol(flow(:,:,1), flow(:,:,2)); + if mexopencv.isOctave() + %HACK: RAD2DEG not implemented in Octave + ang = (ang + pi) * (180 / pi); + else + ang = rad2deg(ang + pi); + end end mag = cv.normalize(mag, 'Alpha',0, 'Beta',1, 'NormType','MinMax'); hsv = cat(3, ang, ones(size(ang),class(ang)), mag); % H=[0,360], S,V=[0,1] diff --git a/opencv_contrib/samples/peilin_demo.m b/opencv_contrib/samples/peilin_demo.m new file mode 100644 index 000000000..373a3997c --- /dev/null +++ b/opencv_contrib/samples/peilin_demo.m @@ -0,0 +1,39 @@ +%% Pei&Lin Normalization +% This program demonstrates Pei-Lin Normalization. +% +% Sources: +% +% * +% + +%% +% Source images +fname1 = fullfile(mexopencv.root(), 'test', 'peilin_plane.png'); +fname2 = fullfile(mexopencv.root(), 'test', 'peilin_shape.png'); +if exist(fname1, 'file') ~= 2 + disp('Downloading Image...') + url = 'https://cdn.rawgit.com/opencv/opencv_contrib/3.3.1/modules/ximgproc/samples/peilin_plane.png'; + urlwrite(url, fname1); +end +if exist(fname2, 'file') ~= 2 + disp('Downloading Image...') + url = 'https://cdn.rawgit.com/opencv/opencv_contrib/3.3.1/modules/ximgproc/samples/peilin_shape.png'; + urlwrite(url, fname2); +end + +%% +% Load images +I = cv.imread(fname1, 'Grayscale',true); +J = cv.imread(fname2, 'Grayscale',true); + +%% +% Apply normalization +N = cv.warpAffine(I, cv.PeiLinNormalization(I)); +D = cv.warpAffine(I, cv.PeiLinNormalization(J), 'WarpInverse',true); + +%% +% Show results +subplot(221), imshow(I), title('I') +subplot(222), imshow(N), title('N') +subplot(223), imshow(J), title('J') +subplot(224), imshow(D), title('D') diff --git a/opencv_contrib/samples/retina_demo.m b/opencv_contrib/samples/retina_demo.m index f8e345d9c..3742c7126 100644 --- a/opencv_contrib/samples/retina_demo.m +++ b/opencv_contrib/samples/retina_demo.m @@ -1,6 +1,5 @@ %% Retina demonstration -% Demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina -% model. +% Demonstrates the use of wrapper class of the Gipsa/Listic Labs retina model. % % This retina model allows spatio-temporal image processing (applied on a % webcam sequences). @@ -17,8 +16,10 @@ % |RetinaDefaultParameters.xml|. You can use this to fine tune parameters and % load them if you save to file |RetinaSpecificParameters.xml| % -% -% +% Sources: +% +% * +% * % %% Webcam diff --git a/opencv_contrib/samples/retina_hdr_tonemapping_demo_gui.m b/opencv_contrib/samples/retina_hdr_tonemapping_demo_gui.m index ae70732e1..4410ba951 100644 --- a/opencv_contrib/samples/retina_hdr_tonemapping_demo_gui.m +++ b/opencv_contrib/samples/retina_hdr_tonemapping_demo_gui.m @@ -21,8 +21,10 @@ % * local logarithmic luminance compression allows details to be enhanced in % low light conditions % -% -% +% Sources: +% +% * +% * % function varargout = retina_hdr_tonemapping_demo_gui(fname) diff --git a/opencv_contrib/samples/structured_edge_detection_demo.m b/opencv_contrib/samples/structured_edge_detection_demo.m index 26dd4b841..b3eec8a92 100644 --- a/opencv_contrib/samples/structured_edge_detection_demo.m +++ b/opencv_contrib/samples/structured_edge_detection_demo.m @@ -4,7 +4,9 @@ % The structered edge demo requires you to provide a model. % This demo downloads a model from the opencv_extra repository on Github. % -% +% Sources: +% +% * % %% Load image diff --git a/opencv_contrib/samples/superpixels_demo.m b/opencv_contrib/samples/superpixels_demo.m index a65dc0013..70d273171 100644 --- a/opencv_contrib/samples/superpixels_demo.m +++ b/opencv_contrib/samples/superpixels_demo.m @@ -1,9 +1,11 @@ %% Superpixels demo % This program demonstrates superpixels segmentation using OpenCV classes -% cv.SuperpixelSEEDS, cv.SuperpixelSLIC, and cv.SuperpixelLSC +% |cv.SuperpixelSEEDS|, |cv.SuperpixelSLIC|, and |cv.SuperpixelLSC|. % -% -% +% Sources: +% +% * +% * % %% Algorithm diff --git a/opencv_contrib/src/+cv/FASTForPointSet.cpp b/opencv_contrib/src/+cv/FASTForPointSet.cpp new file mode 100644 index 000000000..4b33aef96 --- /dev/null +++ b/opencv_contrib/src/+cv/FASTForPointSet.cpp @@ -0,0 +1,62 @@ +/** + * @file FASTForPointSet.cpp + * @brief mex interface for cv::xfeatures2d::FASTForPointSet + * @ingroup xfeatures2d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +#include "opencv2/xfeatures2d.hpp" +using namespace std; +using namespace cv; +using namespace cv::xfeatures2d; + +//TODO: due to a bug in opencv, function always returns empty keypoints +// https://github.com/opencv/opencv_contrib/pull/1435 + +namespace { +/// FAST neighborhood types +const ConstMap FASTTypeMap = ConstMap + ("TYPE_5_8", cv::FastFeatureDetector::TYPE_5_8) + ("TYPE_7_12", cv::FastFeatureDetector::TYPE_7_12) + ("TYPE_9_16", cv::FastFeatureDetector::TYPE_9_16); +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + int threshold = 10; + bool nonmaxSupression = true; + int type = cv::FastFeatureDetector::TYPE_9_16; + for (int i=2; i keypoints(rhs[1].toVector()); + FASTForPointSet(image, keypoints, threshold, nonmaxSupression, type); + plhs[0] = MxArray(keypoints); +} diff --git a/opencv_contrib/src/+cv/GradientDeriche.cpp b/opencv_contrib/src/+cv/GradientDeriche.cpp index 06746d0ab..ab9bc5b95 100644 --- a/opencv_contrib/src/+cv/GradientDeriche.cpp +++ b/opencv_contrib/src/+cv/GradientDeriche.cpp @@ -1,6 +1,6 @@ /** * @file GradientDeriche.cpp - * @brief mex interface for cv::ximgproc::GradientDeriche(X|Y) + * @brief mex interface for cv::ximgproc::GradientDericheX, cv::ximgproc::GradientDericheY * @ingroup ximgproc * @author Amro * @date 2017 diff --git a/opencv_contrib/src/+cv/GradientPaillou.cpp b/opencv_contrib/src/+cv/GradientPaillou.cpp index 8ea1d65d3..13d27a971 100644 --- a/opencv_contrib/src/+cv/GradientPaillou.cpp +++ b/opencv_contrib/src/+cv/GradientPaillou.cpp @@ -1,6 +1,6 @@ /** * @file GradientPaillou.cpp - * @brief mex interface for cv::ximgproc::GradientPaillou(X|Y) + * @brief mex interface for cv::ximgproc::GradientPaillouX, cv::ximgproc::GradientPaillouY * @ingroup ximgproc * @author Amro * @date 2017 diff --git a/opencv_contrib/src/+cv/PeiLinNormalization.cpp b/opencv_contrib/src/+cv/PeiLinNormalization.cpp new file mode 100644 index 000000000..d1a9b0725 --- /dev/null +++ b/opencv_contrib/src/+cv/PeiLinNormalization.cpp @@ -0,0 +1,30 @@ +/** + * @file PeiLinNormalization.cpp + * @brief mex interface for cv::ximgproc::PeiLinNormalization + * @ingroup ximgproc + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +#include "opencv2/ximgproc.hpp" +using namespace std; +using namespace cv; +using namespace cv::ximgproc; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs==1 && nlhs<=1); + + // Process + Mat I(MxArray(prhs[0]).toMat()); + Matx23d T = PeiLinNormalization(I); + plhs[0] = MxArray(T); +} diff --git a/opencv_contrib/src/+cv/anisotropicDiffusion.cpp b/opencv_contrib/src/+cv/anisotropicDiffusion.cpp index eead9a066..8e18a37ca 100644 --- a/opencv_contrib/src/+cv/anisotropicDiffusion.cpp +++ b/opencv_contrib/src/+cv/anisotropicDiffusion.cpp @@ -27,8 +27,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) vector rhs(prhs, prhs+nrhs); // Option processing - float alpha = 1.0; // 1.0/7 - float K = 0.02; + float alpha = 1.0f; // 1.0/7 + float K = 0.02f; int niters = 10; // 1 for (int i=1; i 1) { + double confidence = 0; obj->predict(src, label, confidence); plhs[1] = MxArray(confidence); } diff --git a/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp b/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp index 2147d0fc6..0d018cd29 100644 --- a/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp +++ b/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp @@ -176,8 +176,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==3 && nlhs<=2); Mat src(rhs[2].toMat()); int label = -1; - double confidence = 0; if (nlhs > 1) { + double confidence = 0; obj->predict(src, label, confidence); plhs[1] = MxArray(confidence); } diff --git a/opencv_contrib/src/+cv/private/MotionSaliencyBinWangApr2014_.cpp b/opencv_contrib/src/+cv/private/MotionSaliencyBinWangApr2014_.cpp index 56fef6661..779d3bddf 100644 --- a/opencv_contrib/src/+cv/private/MotionSaliencyBinWangApr2014_.cpp +++ b/opencv_contrib/src/+cv/private/MotionSaliencyBinWangApr2014_.cpp @@ -88,10 +88,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==2 && nlhs<=1); plhs[0] = MxArray(obj->getDefaultName()); } - else if (method == "getClassName") { - nargchk(nrhs==2 && nlhs<=1); - plhs[0] = MxArray(obj->getClassName()); - } else if (method == "computeSaliency") { nargchk(nrhs==3 && nlhs<=1); Mat image(rhs[2].toMat(CV_8U)), diff --git a/opencv_contrib/src/+cv/private/ObjectnessBING_.cpp b/opencv_contrib/src/+cv/private/ObjectnessBING_.cpp index d41bff9f3..3a90ef147 100644 --- a/opencv_contrib/src/+cv/private/ObjectnessBING_.cpp +++ b/opencv_contrib/src/+cv/private/ObjectnessBING_.cpp @@ -102,10 +102,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==2 && nlhs<=1); plhs[0] = MxArray(obj->getDefaultName()); } - else if (method == "getClassName") { - nargchk(nrhs==2 && nlhs<=1); - plhs[0] = MxArray(obj->getClassName()); - } else if (method == "computeSaliency") { nargchk(nrhs==3 && nlhs<=1); Mat image(rhs[2].toMat(CV_8U)); diff --git a/opencv_contrib/src/+cv/private/PCTSignatures_.cpp b/opencv_contrib/src/+cv/private/PCTSignatures_.cpp index 3cc70cc4d..43c6306fc 100644 --- a/opencv_contrib/src/+cv/private/PCTSignatures_.cpp +++ b/opencv_contrib/src/+cv/private/PCTSignatures_.cpp @@ -252,7 +252,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) else if (prop == "WeightB") plhs[0] = MxArray(obj->getWeightB()); else if (prop == "WeightContrast") - plhs[0] = MxArray(obj->getWeightConstrast()); //NOTE: mispelled + plhs[0] = MxArray(obj->getWeightContrast()); else if (prop == "WeightEntropy") plhs[0] = MxArray(obj->getWeightEntropy()); else if (prop == "IterationCount") diff --git a/opencv_contrib/src/+cv/private/Plot2d_.cpp b/opencv_contrib/src/+cv/private/Plot2d_.cpp index c3666a85c..b66955777 100644 --- a/opencv_contrib/src/+cv/private/Plot2d_.cpp +++ b/opencv_contrib/src/+cv/private/Plot2d_.cpp @@ -9,7 +9,7 @@ #include "opencv2/plot.hpp" using namespace std; using namespace cv; -using namespace plot; +using namespace cv::plot; // Persistent objects namespace { @@ -42,12 +42,12 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) Ptr p; if (nrhs == 3) { Mat data(rhs[2].toMat(CV_64F)); - p = createPlot2d(data); + p = Plot2d::create(data); } else { Mat dataX(rhs[2].toMat(CV_64F)), dataY(rhs[3].toMat(CV_64F)); - p = createPlot2d(dataX, dataY); + p = Plot2d::create(dataX, dataY); } obj_[++last_id] = p; plhs[0] = MxArray(last_id); @@ -165,6 +165,16 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) Size sz(rhs[3].toSize()); obj->setPlotSize(sz.width, sz.height); } + else if (prop == "ShowGrid") + obj->setShowGrid(rhs[3].toBool()); + else if (prop == "ShowText") + obj->setShowText(rhs[3].toBool()); + else if (prop == "GridLinesNumber") + obj->setGridLinesNumber(rhs[3].toInt()); + else if (prop == "InvertOrientation") + obj->setInvertOrientation(rhs[3].toBool()); + else if (prop == "PointIdxToPrint") + obj->setPointIdxToPrint(rhs[3].toInt()); else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized property %s", prop.c_str()); diff --git a/opencv_contrib/src/+cv/private/RetinaFastToneMapping_.cpp b/opencv_contrib/src/+cv/private/RetinaFastToneMapping_.cpp index 81632de5a..a75af30f9 100644 --- a/opencv_contrib/src/+cv/private/RetinaFastToneMapping_.cpp +++ b/opencv_contrib/src/+cv/private/RetinaFastToneMapping_.cpp @@ -39,7 +39,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Constructor is called. Create a new object from argument if (method == "new") { nargchk(nrhs==3 && nlhs<=1); - obj_[++last_id] = createRetinaFastToneMapping(rhs[2].toSize()); + obj_[++last_id] = RetinaFastToneMapping::create(rhs[2].toSize()); plhs[0] = MxArray(last_id); mexLock(); return; diff --git a/opencv_contrib/src/+cv/private/Retina_.cpp b/opencv_contrib/src/+cv/private/Retina_.cpp index 0c72d4bd1..a17eb86cb 100644 --- a/opencv_contrib/src/+cv/private/Retina_.cpp +++ b/opencv_contrib/src/+cv/private/Retina_.cpp @@ -58,7 +58,7 @@ Ptr create_Retina( mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - return createRetina(inputSize, colorMode, colorSamplingMethod, + return Retina::create(inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrength); } diff --git a/opencv_contrib/src/+cv/private/StaticSaliencyFineGrained_.cpp b/opencv_contrib/src/+cv/private/StaticSaliencyFineGrained_.cpp index becb9f2e0..626460a4b 100644 --- a/opencv_contrib/src/+cv/private/StaticSaliencyFineGrained_.cpp +++ b/opencv_contrib/src/+cv/private/StaticSaliencyFineGrained_.cpp @@ -88,10 +88,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==2 && nlhs<=1); plhs[0] = MxArray(obj->getDefaultName()); } - else if (method == "getClassName") { - nargchk(nrhs==2 && nlhs<=1); - plhs[0] = MxArray(obj->getClassName()); - } else if (method == "computeSaliency") { nargchk(nrhs==3 && nlhs<=1); Mat image(rhs[2].toMat(CV_8U)), diff --git a/opencv_contrib/src/+cv/private/StaticSaliencySpectralResidual_.cpp b/opencv_contrib/src/+cv/private/StaticSaliencySpectralResidual_.cpp index f5609f7c7..0e245ef25 100644 --- a/opencv_contrib/src/+cv/private/StaticSaliencySpectralResidual_.cpp +++ b/opencv_contrib/src/+cv/private/StaticSaliencySpectralResidual_.cpp @@ -88,10 +88,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==2 && nlhs<=1); plhs[0] = MxArray(obj->getDefaultName()); } - else if (method == "getClassName") { - nargchk(nrhs==2 && nlhs<=1); - plhs[0] = MxArray(obj->getClassName()); - } else if (method == "computeSaliency") { nargchk(nrhs==3 && nlhs<=1); Mat image(rhs[2].toMat()), diff --git a/opencv_contrib/src/+cv/private/TransientAreasSegmentationModule_.cpp b/opencv_contrib/src/+cv/private/TransientAreasSegmentationModule_.cpp index b56e6ef4b..536313e00 100644 --- a/opencv_contrib/src/+cv/private/TransientAreasSegmentationModule_.cpp +++ b/opencv_contrib/src/+cv/private/TransientAreasSegmentationModule_.cpp @@ -99,7 +99,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Constructor is called. Create a new object from argument if (method == "new") { nargchk(nrhs==3 && nlhs<=1); - obj_[++last_id] = createTransientAreasSegmentationModule( + obj_[++last_id] = TransientAreasSegmentationModule::create( rhs[2].toSize()); plhs[0] = MxArray(last_id); mexLock(); diff --git a/opencv_contrib/test/unit_tests/TestCalcOpticalFlowSparseToDense.m b/opencv_contrib/test/unit_tests/TestCalcOpticalFlowSparseToDense.m index 49f75714d..463f19757 100644 --- a/opencv_contrib/test/unit_tests/TestCalcOpticalFlowSparseToDense.m +++ b/opencv_contrib/test/unit_tests/TestCalcOpticalFlowSparseToDense.m @@ -3,6 +3,11 @@ methods (Static) function test_1 + %TODO: sometimes segfaults, most likely an opencv bug + if true + error('mexopencv:testskip', 'todo'); + end + im1 = 255*uint8([... 0 0 0 0 0 0 0 0 0 0;... 0 0 0 0 0 0 0 0 0 0;... diff --git a/opencv_contrib/test/unit_tests/TestDataset.m b/opencv_contrib/test/unit_tests/TestDataset.m index 3edddb0c5..fd77082fa 100644 --- a/opencv_contrib/test/unit_tests/TestDataset.m +++ b/opencv_contrib/test/unit_tests/TestDataset.m @@ -18,6 +18,11 @@ end function test_mnist + %TODO: sometimes segfaults, most likely an opencv bug + if true + error('mexopencv:testskip', 'todo'); + end + % see: dataset_mnist_demo.m dirMNIST = fullfile(mexopencv.root(), 'test', 'mnist', filesep()); if ~isdir(dirMNIST) diff --git a/opencv_contrib/test/unit_tests/TestFASTForPointSet.m b/opencv_contrib/test/unit_tests/TestFASTForPointSet.m new file mode 100644 index 000000000..91b71f5eb --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestFASTForPointSet.m @@ -0,0 +1,26 @@ +classdef TestFASTForPointSet + %TestFASTForPointSet + + methods (Static) + function test_1 + img = imread(fullfile(mexopencv.root(),'test','tsukuba_l.png')); + kpts = [ + randi([0 size(img,2)-1], [100 1]), ... + randi([0 size(img,1)-1], [100 1]) + ]; + kpts = cv.KeyPointsFilter.convertFromPoints(kpts); + kpts = cv.FASTForPointSet(img, kpts, 'Threshold',10); + validateattributes(kpts, {'struct'}, {'vector'}); + end + + function test_error_argnum + try + cv.FASTForPointSet(); + throw('UnitTest:Fail'); + catch e + assert(strcmp(e.identifier,'mexopencv:error')); + end + end + end + +end diff --git a/opencv_contrib/test/unit_tests/TestMotionSaliencyBinWangApr2014.m b/opencv_contrib/test/unit_tests/TestMotionSaliencyBinWangApr2014.m index e24f68fe1..825c155f8 100644 --- a/opencv_contrib/test/unit_tests/TestMotionSaliencyBinWangApr2014.m +++ b/opencv_contrib/test/unit_tests/TestMotionSaliencyBinWangApr2014.m @@ -14,15 +14,12 @@ assert(isequal(saliency.ImageWidth, size(img,2))); assert(isequal(saliency.ImageHeight, size(img,1))); - cname = saliency.getClassName(); - validateattributes(cname, {'char'}, {'vector', 'nonempty'}); - for i=1:min(5,cap.FrameCount) img = cap.read(); img = cv.cvtColor(img, 'RGB2GRAY'); saliencyMap = saliency.computeSaliency(img); - validateattributes(saliencyMap, {'single'}, ... + validateattributes(saliencyMap, {'uint8'}, ... {'size',[size(img,1) size(img,2)], 'binary'}); end end diff --git a/opencv_contrib/test/unit_tests/TestObjectnessBING.m b/opencv_contrib/test/unit_tests/TestObjectnessBING.m index 5cf0f723f..1ccb838eb 100644 --- a/opencv_contrib/test/unit_tests/TestObjectnessBING.m +++ b/opencv_contrib/test/unit_tests/TestObjectnessBING.m @@ -4,8 +4,6 @@ methods (Static) function test_1 saliency = cv.ObjectnessBING(); - cname = saliency.getClassName(); - validateattributes(cname, {'char'}, {'vector', 'nonempty'}); resdir = tempname(); cObj = onCleanup(@() deleteDir(resdir)); diff --git a/opencv_contrib/test/unit_tests/TestPeiLinNormalization.m b/opencv_contrib/test/unit_tests/TestPeiLinNormalization.m new file mode 100644 index 000000000..3b76a7170 --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestPeiLinNormalization.m @@ -0,0 +1,21 @@ +classdef TestPeiLinNormalization + %TestPeiLinNormalization + + methods (Static) + function test_1 + img = cv.imread(fullfile(mexopencv.root(),'test','shape03.png'), 'Grayscale',true); + T = cv.PeiLinNormalization(img); + validateattributes(T, {'double'}, {'size',[2 3]}); + end + + function test_error_argnum + try + cv.PeiLinNormalization(); + throw('UnitTest:Fail'); + catch e + assert(strcmp(e.identifier,'mexopencv:error')); + end + end + end + +end diff --git a/opencv_contrib/test/unit_tests/TestStaticSaliencyFineGrained.m b/opencv_contrib/test/unit_tests/TestStaticSaliencyFineGrained.m index cfd45c23f..fe52334b0 100644 --- a/opencv_contrib/test/unit_tests/TestStaticSaliencyFineGrained.m +++ b/opencv_contrib/test/unit_tests/TestStaticSaliencyFineGrained.m @@ -6,8 +6,6 @@ img = imread(fullfile(mexopencv.root(),'test','balloon.jpg')); saliency = cv.StaticSaliencyFineGrained(); - cname = saliency.getClassName(); - validateattributes(cname, {'char'}, {'vector', 'nonempty'}); saliencyMap = saliency.computeSaliency(img); validateattributes(saliencyMap, {'uint8'}, ... diff --git a/opencv_contrib/test/unit_tests/TestStaticSaliencySpectralResidual.m b/opencv_contrib/test/unit_tests/TestStaticSaliencySpectralResidual.m index a1103c6af..fc4b2001b 100644 --- a/opencv_contrib/test/unit_tests/TestStaticSaliencySpectralResidual.m +++ b/opencv_contrib/test/unit_tests/TestStaticSaliencySpectralResidual.m @@ -7,9 +7,6 @@ saliency.ImageWidth = 64; saliency.ImageHeight = 64; - cname = saliency.getClassName(); - validateattributes(cname, {'char'}, {'vector', 'nonempty'}); - img = imread(fullfile(mexopencv.root(),'test','balloon.jpg')); saliencyMap = saliency.computeSaliency(img); validateattributes(saliencyMap, {'single'}, ... diff --git a/samples/FileStorage_demo.m b/samples/FileStorage_demo.m index c25ab447e..31a626ffa 100644 --- a/samples/FileStorage_demo.m +++ b/samples/FileStorage_demo.m @@ -1,12 +1,14 @@ -%% FileStorage demo +%% Reading/Writing XML and YAML files % Demonstration of reading/writing from/to XML and YAML file storages. % -% +% Sources: +% +% * % %% Data % First we create some different variables to save. -% We have an integer, text string (calibration date), 2 matrices, +% We have an integer, a text string (calibration date), 2 matrices, % and a custom structure "feature" (struct-array), which includes feature % coordinates and LBP (local binary pattern) value. frameCount = int32(5); diff --git a/samples/RotatedRect_demo.m b/samples/RotatedRect_demo.m index 4b6f2a08b..07e0aa4f3 100644 --- a/samples/RotatedRect_demo.m +++ b/samples/RotatedRect_demo.m @@ -1,4 +1,4 @@ -%% Rotated Rectangle demo +%% Rotated Rectangle % The sample below demonstrates how to use |cv.RotatedRect| % diff --git a/samples/akaze_match_demo.m b/samples/akaze_match_demo.m index 142b0234a..d71185589 100644 --- a/samples/akaze_match_demo.m +++ b/samples/akaze_match_demo.m @@ -5,9 +5,11 @@ % with given homography matrix, match them and count the number of inliers % (i.e. matches that fit in the given homography). % -% , -% -% +% Sources: +% +% * +% * +% * % %% Description @@ -18,7 +20,7 @@ % We are going to use images 1 and 3 from _Graffity_ sequence of Oxford % dataset. % -% <> +% <> % % Homography is given by a 3-by-3 matrix shown below. % diff --git a/samples/asift_demo.m b/samples/asift_demo.m index fd49082f0..e1dd02465 100644 --- a/samples/asift_demo.m +++ b/samples/asift_demo.m @@ -6,7 +6,9 @@ % implementation is based on SIFT, you can try to use SURF or ORB detectors % instead. Homography RANSAC is used to reject outliers. % -% +% Sources: +% +% * % function asift_demo() @@ -121,6 +123,9 @@ function asift_demo() function [kpts, descs] = affine_detect(detector, img, mask) %AFFINE_DETECT Affine-SIFT (ASIFT) algorithm % + % [kpts, descs] = affine_detect(detector, img) + % [kpts, descs] = affine_detect(detector, img, mask) + % % ## Input % * __detector__ detector object % * __img__ input image @@ -181,6 +186,8 @@ function asift_demo() function [img, mask, Ai] = affine_skew(tilt, phi, img, mask) %AFFINE_SKEW Transform image/mask by an affine distortion % + % [img, mask, Ai] = affine_skew(tilt, phi, img, mask) + % % ## Input % * __tilt__ tilt % * __phi__ rotation angle in degrees @@ -247,6 +254,9 @@ function asift_demo() function pos = select_poly(ax) %SELECT_POLY Select polygon area using the mouse % + % pos = select_poly() + % pos = select_poly(ax) + % % ## Input % * __ax__ axes handle, default gca % diff --git a/samples/backproject_demo_gui.m b/samples/backproject_demo_gui.m index 20c07f6fa..1d31c1dbc 100644 --- a/samples/backproject_demo_gui.m +++ b/samples/backproject_demo_gui.m @@ -6,10 +6,12 @@ % * How to use the OpenCV function |cv.calcBackProject| to calculate Back % Projection % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Theory @@ -27,16 +29,16 @@ % (which we know represents a sample of skin tonality). You applied some mask % to capture only the histogram of the skin area: % -% <> +% <> % -% <> +% <> % % Now, let's imagine that you get another hand image (test Image) like the one % below, with its respective histogram: % -% <> +% <> % -% <> +% <> % % What we want to do is to use our _model histogram_ (that we know represents % a skin tonality) to detect skin areas in our test image. Here are the steps @@ -52,7 +54,7 @@ % * Applying the steps above, we get the following BackProjection image for % our test image: % -% <> +% <> % % * In terms of statistics, the values stored in _BackProjection_ represent % the _probability_ that a pixel in _test image_ belongs to a skin area, @@ -98,8 +100,8 @@ % source and target images im = { - 'http://docs.opencv.org/3.2.0/Histogram_Comparison_Source_0.jpg' - 'http://docs.opencv.org/3.2.0/Histogram_Comparison_Source_1.jpg' + 'https://docs.opencv.org/3.2.0/Histogram_Comparison_Source_0.jpg' + 'https://docs.opencv.org/3.2.0/Histogram_Comparison_Source_1.jpg' }; imgs = cell(size(im)); for i=1:numel(im) diff --git a/samples/caffe_googlenet_demo.m b/samples/caffe_googlenet_demo.m index c56be27b6..8d8dfb518 100644 --- a/samples/caffe_googlenet_demo.m +++ b/samples/caffe_googlenet_demo.m @@ -1,82 +1,66 @@ -%% Deep Neural Network Demo +%% Deep Neural Network with Caffe models % Load Caffe framework models. % % In this tutorial you will learn how to use DNN module for image % classification by using GoogLeNet trained network from % . % -% , -% +% Sources: +% +% * +% * +% * % %% BVLC GoogLeNet % First, we download GoogLeNet model files: % -% * |bvlc_googlenet.prototxt| and |bvlc_googlenet.caffemodel| +% * |deploy.prototxt| and |bvlc_googlenet.caffemodel| % * Also we need file with names of % % classes: |synset_words.txt|. % -dirDNN = fullfile(mexopencv.root(), 'test', 'dnn'); +dirDNN = fullfile(mexopencv.root(), 'test', 'dnn', 'GoogLeNet'); modelLabels = fullfile(dirDNN, 'synset_words.txt'); -modelTxt = fullfile(dirDNN, 'bvlc_googlenet.prototxt'); -modelBin = fullfile(dirDNN, 'bvlc_googlenet.caffemodel'); -if ~isdir(dirDNN) - mkdir(dirDNN); -end -if exist(modelLabels, 'file') ~= 2 - disp('Downloading...') - url = 'https://cdn.rawgit.com/opencv/opencv/3.3.0/samples/data/dnn/synset_words.txt'; - urlwrite(url, modelLabels); -end -if exist(modelTxt, 'file') ~= 2 - disp('Downloading...') - url = 'https://cdn.rawgit.com/opencv/opencv/3.3.0/samples/data/dnn/bvlc_googlenet.prototxt'; - urlwrite(url, modelTxt); -end -if exist(modelBin, 'file') ~= 2 - disp('Downloading...') - url = 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel'; - urlwrite(url, modelBin); - - % verify checksum of downloaded file - if ~mexopencv.isOctave() - sha1 = '405fc5acd08a3bb12de8ee5e23a96bec22f08204'; - md = java.security.MessageDigest.getInstance('SHA-1'); - fid = fopen(modelBin, 'rb'); - while true - [b,n] = fread(fid, 1024*512, '*int8'); % 512KB - if n <= 0, break; end - md.update(b); - end - fclose(fid); - hex = reshape(dec2hex(typecast(md.digest(),'uint8')).', 1, []); - assert(isequal(hex, sha1), 'Checksum failed'); +modelTxt = fullfile(dirDNN, 'deploy.prototxt'); +modelBin = fullfile(dirDNN, 'bvlc_googlenet.caffemodel'); % 51 MB file +files = {modelLabels, modelTxt, modelBin}; +urls = { + 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/synset_words.txt'; + 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/bvlc_googlenet.prototxt'; + 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel'; +}; +if ~isdir(dirDNN), mkdir(dirDNN); end +for i=1:numel(files) + if exist(files{i}, 'file') ~= 2 + disp('Downloading...') + urlwrite(urls{i}, files{i}); end end %% Load class labels -fid = fopen(modelLabels, 'rt'); -if mexopencv.isOctave() - %HACK: %[] format specifier not implemented in Octave - C = textscan(fid, '%s', 'Delimiter','\n'); - labels = regexprep(C{1}, '^\w+\s*', '', 'once'); +if ~mexopencv.isOctave() + fid = fopen(modelLabels, 'rt'); + C = textscan(fid, '%*s %[^\n]'); + fclose(fid); + labels = C{1}; else - C = textscan(fid, '%s %[^\n]'); - labels = C{2}; + %HACK: textscan is buggy and unreliable in Octave! + labels = textread(modelLabels, '%s', 'Delimiter','\n'); + labels = regexprep(labels, '^\w+\s*', '', 'once'); end -fclose(fid); - fprintf('%d classes\n', numel(labels)); %% Create and initialize network from Caffe model -net = cv.Net(); -net.import('Caffe', modelTxt, modelBin); +net = cv.Net('Caffe', modelTxt, modelBin); assert(~net.empty(), 'Cant load network'); +if false + net.setPreferableTarget('OpenCL'); +end %% Prepare blob from input image % Read input image (BGR channel order) -img = cv.imread(fullfile(mexopencv.root(), 'test', 'cat.jpg'), ... +img = cv.imread(fullfile(mexopencv.root(), 'test', 'space_shuttle.jpg'), ... 'Color',true, 'FlipChannels',false); %% @@ -86,17 +70,17 @@ % (B: 104.0069879317889, G: 116.66876761696767, R: 122.6789143406786) if true blob = cv.Net.blobFromImages(img, ... - 'Size',[224 224], 'Mean',[104 117 123], 'SwapRB',true); + 'Size',[224 224], 'Mean',[104 117 123], 'SwapRB',false); else - % NOTE: blobFromImages does crop/resize to maintain aspect ratio of image - blob = cv.resize(img, [224 224]); % Size - blob = bsxfun(@plus, blob, uint8(cat(3,123,117,104))); % Mean (BGR) - blob = permute(blob, [4 3 1 2]); % HWCN -> NCHW - blob = single(blob); % CV_32F + % NOTE: blobFromImages does crop/resize to preserve aspect ratio of image + blob = cv.resize(img, [224 224]); % Size + blob = single(blob); % CV_32F + blob = bsxfun(@minus, blob, cat(3,104,117,123)); % Mean (BGR) + blob = permute(blob, [4 3 1 2]); % HWCN -> NCHW end %% Set the network input -% In |bvlc_googlenet.prototxt| the network input blob named as "data". +% In |deploy.prototxt| the network input blob named as "data". % Other blobs labeled as "name_of_layer.name_of_layer_output". net.setInput(blob, 'data'); @@ -104,7 +88,7 @@ % During the forward pass output of each network layer is computed, % but in this example we need output from "prob" layer only. tic -p = net.forward('prob'); % vector of length 1000 +p = net.forward('prob'); % 1x1000 vector toc %% Gather output of "prob" layer @@ -123,7 +107,8 @@ disp(t(1:5,:)); end -subplot(3,1,1:2), imshow(flip(img,3)), title('cat.jpg') +subplot(3,1,1:2), imshow(flip(img,3)) +title('space_shuttle.jpg', 'Interpreter','none') subplot(3,1,3), barh(1:5, p(ord(1:5))*100) set(gca, 'YTickLabel',labels(ord(1:5)), 'YDir','reverse') axis([0 100 0 6]), grid on diff --git a/samples/calibration_capture_demo.m b/samples/calibration_capture_demo.m index 50a624f8b..ec67d2eed 100644 --- a/samples/calibration_capture_demo.m +++ b/samples/calibration_capture_demo.m @@ -54,7 +54,7 @@ % % The following figure illustrates the pinhole camera model. % -% <> +% <> % % Real lenses usually have some distortion, mostly radial distortion and % slight tangential distortion. So, the above model is extended as: @@ -84,7 +84,7 @@ % distortion (typically $k_1 > 0$ and pincushion distortion (typically % $k_1 < 0$). % -% <> +% <> % % In some cases the image sensor may be tilted in order to focus an oblique % plane in front of the camera (Scheimpfug condition). This can be useful for @@ -156,10 +156,10 @@ pattern = 'chessboard'; switch pattern case 'chessboard' - % http://docs.opencv.org/3.2.0/pattern.png + % https://docs.opencv.org/3.3.1/pattern.png bsz = [9 6]; case 'acircles' - % http://docs.opencv.org/3.2.0/acircles_pattern.png + % https://docs.opencv.org/3.3.1/acircles_pattern.png % or https://nerian.com/support/resources/patterns/ bsz = [4 11]; case 'circles' @@ -169,6 +169,7 @@ %% % open webcam cap = cv.VideoCapture(0); +assert(cap.isOpened(), 'Failed to initialize camera capture'); img = cap.read(); sz = size(img); diff --git a/samples/calibration_demo.m b/samples/calibration_demo.m index 8dac403c8..3a6de8ffa 100644 --- a/samples/calibration_demo.m +++ b/samples/calibration_demo.m @@ -1,6 +1,6 @@ %% Camera Calibration % -% This example demonstrates camera calibration in Opencv. It shows usage of +% This example demonstrates camera calibration in OpenCV. It shows usage of % the following functions: % % * |cv.findChessboardCorners|, |cv.findCirclesGrid| @@ -34,11 +34,13 @@ % * about distortions in camera, intrinsic and extrinsic parameters of camera % * how to find these parameters, undistort images, etc. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Theory @@ -61,10 +63,10 @@ % shown below, where two edges of a chess board are marked with red lines. But % you can see that border is not a straight line and doesn't match with the % red line. All the expected straight lines are bulged out. See -% for more +% for more % details. % -% <> +% <> % % Tangential distortion occurs because the image taking lenses are not % perfectly parallel to the imaging plane. So some areas in image may look @@ -78,7 +80,7 @@ % So we have five distortion parameters which in OpenCV are presented as one % row matrix with 5 columns: % -% $$distortion\_coefficients=(k_1, k_2, p_1, p_2, k_3)$$ +% $$distortion\_coefficients = (k_1, k_2, p_1, p_2, k_3)$$ % % In addition to this, we need to find a few more information, like intrinsic % and extrinsic parameters of a camera. Intrinsic parameters are specific to a @@ -111,7 +113,7 @@ % * Symmetrical circle pattern % * Asymmetrical circle pattern % -% <> +% <> % % Basically, you need to take snapshots of these patterns with your camera and % let OpenCV find them. Each found pattern results in a new equation (we know @@ -168,9 +170,9 @@ % lens distortion. For example, you can see in the result below that all the % edges become straight. % -% <> +% <> % -% <> +% <> % %% Code @@ -615,7 +617,7 @@ function visualizeDistortion(calib, opts) D = calib.D; if numel(D) < 14, D(14) = 0; end - % http://docs.opencv.org/3.2.0/d9/d0c/group__calib3d.html#details + % https://docs.opencv.org/3.2.0/d9/d0c/group__calib3d.html#details nstep = 20; [u,v] = meshgrid(linspace(0,opts.imageSize(1)-1,nstep), ... linspace(0,opts.imageSize(2)-1,nstep)); diff --git a/samples/calibration_pose_demo.m b/samples/calibration_pose_demo.m index 715e58187..f1cfe51bb 100644 --- a/samples/calibration_pose_demo.m +++ b/samples/calibration_pose_demo.m @@ -3,8 +3,10 @@ % In this sample, we learn to exploit calib3d module to create some 3D effects % in images from a calibrated camera. % -% , -% +% Sources: +% +% * +% * % %% diff --git a/samples/camshift_demo_gui.m b/samples/camshift_demo_gui.m index ab06d775b..abaffd06f 100644 --- a/samples/camshift_demo_gui.m +++ b/samples/camshift_demo_gui.m @@ -1,4 +1,4 @@ -%% CAMShift Demo +%% CAMShift % % In this demo, we learn about Meanshift and Camshift algorithms to find and % track objects in videos. @@ -7,10 +7,12 @@ % You select a color objects such as your face and it tracks it. % It reads from video camera by default. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Meanshift @@ -21,7 +23,7 @@ % to the area of maximum pixel density (or maximum number of points). It is % illustrated in the simple image given below: % -% <> +% <> % % The initial window is shown in blue circle with the name "C1". Its original % center is marked in blue rectangle, named "C1_o". But if you find the @@ -36,7 +38,7 @@ % see in image, it has maximum number of points. The whole process is % demonstrated on a static image below: % -% <> +% <> % % So we normally pass the histogram backprojected image and initial target % location. When the object moves, obviously the movement is reflected in @@ -67,7 +69,7 @@ % meanshift with new scaled search window and previous window location. The % process is continued until required accuracy is met. % -% <> +% <> % % In OpenCV, camshift is almost same as meanshift, but it returns a rotated % rectangle (that is our result) and box parameters (used to be passed as @@ -75,7 +77,7 @@ % % For additional resources, see: % -% * French Wikipedia page on . +% * French Wikipedia page on . % (The two animations are taken from there) % * Bradski, G.R., "Real time face and object tracking as a component of a % perceptual user interface," Applications of Computer Vision, 1998. diff --git a/samples/camshift_track_demo.m b/samples/camshift_track_demo.m index e0edea5b8..191587a8c 100644 --- a/samples/camshift_track_demo.m +++ b/samples/camshift_track_demo.m @@ -2,8 +2,10 @@ % % In this demo, we implement a simple face tracker applied on an input video. % -% , -% +% Sources: +% +% * +% * % %% Video diff --git a/samples/capture_demo_gui.m b/samples/capture_demo_gui.m index 19cabf0d7..68ab2155f 100644 --- a/samples/capture_demo_gui.m +++ b/samples/capture_demo_gui.m @@ -1,10 +1,12 @@ -%% Video Capture demo +%% Video Capture % We learn how to capture live stream from camera and display it, while % adjusting basic video color properties. % -% -% -% +% Sources: +% +% * +% * +% * % function varargout = capture_demo_gui() diff --git a/samples/clahe_demo_gui.m b/samples/clahe_demo_gui.m index 52d61ce97..4a6ee86b1 100644 --- a/samples/clahe_demo_gui.m +++ b/samples/clahe_demo_gui.m @@ -3,8 +3,10 @@ % In this demo, we will learn the concepts of histogram equalization and use % it to improve the contrast of our images. % -% , -% +% Sources: +% +% * +% * % %% Histogram Equalization @@ -16,14 +18,14 @@ % below image, from Wikipedia) and that is what Histogram Equalization does % (in simple words). This normally improves the contrast of the image. % -% <> +% <> % % Refer to the Wikipedia page on -% +% % for more details about it. It has a very good explanation with worked out % examples. % -% <> +% <> % % You can see histogram lies in brighter region. We need the full spectrum. % For that, we need a transformation function which maps the input pixels in @@ -37,7 +39,7 @@ % transform. Next we calculate its histogram and CDF as before, and result % would look like below : % -% <> +% <> % % Another important feature is that, even if the image was a darker image % (instead of a brighter one we used), after equalization we will get almost @@ -50,7 +52,7 @@ % OpenCV has a function to do this, |cv.equalizeHist|. Its input is just % grayscale image and output is our histogram equalized image. % -% <> +% <> % % So now you can take different images with different light conditions, % equalize it and check the results. @@ -67,7 +69,7 @@ % of the image. In many cases, it is not a good idea. For example, below image % shows an input image and its result after global histogram equalization. % -% <> +% <> % % It is true that the background contrast has improved after histogram % equalization. But compare the face of statue in both images. We lost most of @@ -88,9 +90,9 @@ % See CLAHE result below and compare it with results above, especially the % statue region: % -% <> +% <> % -% Also see . +% Also see . % %% Code diff --git a/samples/cloning_demo.m b/samples/cloning_demo.m index fc5cd2759..3750bc4ec 100644 --- a/samples/cloning_demo.m +++ b/samples/cloning_demo.m @@ -1,4 +1,4 @@ -%% Seamless Cloning demo +%% Seamless Cloning % This tutorial demonstrates how to use OpenCV seamless cloning module. % % * Normal Cloning @@ -13,9 +13,11 @@ % % Test images are downloaded from |opencv_extra| on Github. % -% , -% , -% +% Sources: +% +% * +% * +% * % %% Test images diff --git a/samples/coherence_demo_gui.m b/samples/coherence_demo_gui.m index 27c2a3c3f..04a6e2d33 100644 --- a/samples/coherence_demo_gui.m +++ b/samples/coherence_demo_gui.m @@ -1,11 +1,13 @@ -%% Coherence-enhancing filtering +%% Coherence-Enhancing Filtering % % Inspired by: % % * Joachim Weickert, "Coherence-Enhancing Shock Filters" % % -% +% Sources: +% +% * % function varargout = coherence_demo_gui(im) diff --git a/samples/color_histogram_demo_gui.m b/samples/color_histogram_demo_gui.m index a4834b7d4..8e87b6548 100644 --- a/samples/color_histogram_demo_gui.m +++ b/samples/color_histogram_demo_gui.m @@ -1,11 +1,13 @@ -%% Video histogram +%% Video Histogram % % Demo to show live histogram of video, both 1D histograms of RGB channels % and 2D histogram of Hue-Saturation. % -% , -% , -% +% Sources: +% +% * +% * +% * % function varargout = color_histogram_demo_gui(varargin) diff --git a/samples/common/MOSSETracker.m b/samples/common/MOSSETracker.m new file mode 100644 index 000000000..445d6cf89 --- /dev/null +++ b/samples/common/MOSSETracker.m @@ -0,0 +1,327 @@ +classdef MOSSETracker < handle + %MOSSETRACKER MOSSE filter based tracker + % + % Correlation filter based tracking using MOSSE filters + % (Minimum Output Sum of Squared Error), described in [Bolme2010]. + % + % The target object appearance is modeled using adaptive correlation + % filters, and tracking is performed via convolution. + % + % The filters are learnt online in an adaptive manner for visual tracking. + % + % ## Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.1/samples/python/mosse.py + % + % ## References + % [Bolme2010]: + % > David S. Bolme et al. + % > "Visual Object Tracking using Adaptive Correlation Filters" + % > [PDF](http://www.cs.colostate.edu/~draper/papers/bolme_cvpr10.pdf) + % + + properties + % criteria for successful tracking, psr > min_psr + % A value between [20,60] indicates very strong peaks, + % less than 7 indicates bad track quality. + min_psr = 8.0; + end + + properties (Access = private) + % Hanning window used when preprocessing images + win + % Fourier transform of 2D Gaussian shaped peak centered on target + G + % correlation filters/kernels + H1 + H2 + H + % last image of target + last_img + % last correlation response + last_resp + end + + properties (SetAccess = private) + % center [x,y] of tracked object rectangle + pos + % size [w,h] of tracked object rectangle + siz + % Peak-to-Sidelobe Ratio (PSR) + psr + end + + properties (Dependent, SetAccess = private) + % tracked object position, as a `[x y w h]` rectangle + bbox + end + + methods + function obj = MOSSETracker(frame, rect) + %MOSSETRACKER Constructor + % + % obj = MOSSETracker(frame, rect) + % + % ## Input + % * __frame__ first frame with target to track + % * __rect__ rectangle around target object `[x,y,w,h]` + % + + % center and size of object rectangle (after DFT optimal padding) + obj.siz = arrayfun(@(s) cv.getOptimalDFTSize(s), rect(3:4)); + obj.pos = floor((2*rect(1:2) + rect(3:4) - obj.siz) / 2); + obj.pos = obj.pos + 0.5 * (obj.siz - 1); + + % create Hanning window of same size as rect + obj.win = cv.createHanningWindow(obj.siz, 'Type','single'); + + % create Gaussian shaped peak, centered and of same size as rect + % (Kronecker delta with 1 at target center, 0 elsewhere) + g = zeros(obj.siz(2), obj.siz(1), 'single'); + g(floor(end/2),floor(end/2)) = 1; + g = cv.GaussianBlur(g, 'KSize',[-1 -1], 'SigmaX',2.0); + g = g / max(g(:)); + + % initialize correlation filters + obj.G = cv.dft(g, 'ComplexOutput',true); + obj.H1 = zeros(size(obj.G), class(obj.G)); + obj.H2 = zeros(size(obj.G), class(obj.G)); + + % train filters on a bunch of augmented images + % by applying small random affine perturbations + img = cv.getRectSubPix(frame, obj.siz, obj.pos); + for i=1:128 + a = obj.preprocess(random_warp(img)); + obj.update_kernels(a); + end + + % process first frame + obj.update(frame); + end + + function update(obj, frame, rate) + %UPDATE Track object in new frame and update filters + % + % obj.update(frame) + % obj.update(frame, rate) + % + % ## Input + % * __frame__ new frame in which to track object + % * __rate__ learning rate in [0,1] range, default 0.125 + % + + % crop and process image from current position + obj.last_img = cv.getRectSubPix(frame, obj.siz, obj.pos); + img = obj.preprocess(obj.last_img); + + % track target by correlating filter over image + [obj.last_resp, d, obj.psr] = obj.correlate(img); + + % check that response peack is strong enough (using PSR) + if obj.psr <= obj.min_psr + return; + end + + % update object position + obj.pos = obj.pos + d; + + % crop and process image from new position + obj.last_img = cv.getRectSubPix(frame, obj.siz, obj.pos); + img = obj.preprocess(obj.last_img); + + % online update (filter training) + if nargin < 3, rate = 0.125; end + obj.update_kernels(img, rate); + end + + function [img, f, resp] = visualize_state(obj) + %VISUALIZE_STATE Visualize tracker state + % + % [img, f, resp] = obj.visualize_state() + % + % ## Output + % * __img__ cropped object image + % * __f__ correlation filter, 0-freq centered and 8-bit normalized + % * __resp__ correlation response, 8-bit normalized and clipped + % + + % image + img = obj.last_img; + + % kernel + f = cv.dft(obj.H, 'Inverse',true, 'Scale',true, 'RealOutput',true); + f = circshift(f, floor(-[size(f,1) size(f,2)]/2)); + if true + f = (f - min(f(:))) / (max(f(:)) - min(f(:))); + else + f = cv.normalize(f, 'NormType','MinMax'); + end + f = uint8(255 * f); + + % response + if true + resp = obj.last_resp / max(obj.last_resp(:)); + else + resp = cv.normalize(obj.last_resp, 'NormType','Inf'); + end + resp = uint8(255 * min(max(resp, 0), 1)); + end + + function vis = draw_object(obj, vis) + %DRAW_OBJECT Draw current location of tracked object + % + % vis = obj.draw_object(vis) + % + % ## Input + % * __vis__ input image + % + % ## Output + % * __vis__ output image with drawn object + % + + clr = {'Color',[0 0 255]}; + p1 = fix(obj.pos - 0.5 * obj.siz); + p2 = fix(obj.pos + 0.5 * obj.siz); + + % draw location + vis = cv.rectangle(vis, p1, p2, 'Thickness',2, clr{:}); + if obj.psr > obj.min_psr + % good track, draw center + vis = cv.circle(vis, fix(obj.pos), 2, 'Thickness',-1, clr{:}); + else + % bad track, draw cross + vis = cv.line(vis, [p1; p2(1) p1(2)], [p2; p1(1) p2(2)], clr{:}); + end + + % draw PSR value + vis = cv.putText(vis, sprintf('PSR: %.2f', obj.psr), ... + [p1(1) p2(2)+16], 'FontFace','HersheyPlain', ... + 'Thickness',2, 'LineType','AA', clr{:}); + end + + function bbox = get.bbox(obj) + p1 = obj.pos - 0.5 * obj.siz; + p2 = obj.pos + 0.5 * obj.siz; + bbox = cv.Rect.from2points(p1, p2); + end + end + + methods (Access = private) + function img = preprocess(obj, img) + %PREPROCESS Process input frame + % + % img = obj.preprocess(img) + % + % ## Input + % * __img__ input image + % + % ## Output + % * __img__ output processed image + % + + % log transform, normalize, and multiply by Hanning window + img = log(single(img) + 1); + if true + img = img / norm(img(:)); + elseif true + img = cv.normalize(img, 'NormType','L2'); + else + img = (img - mean(img(:))) / std(img(:)); + end + img = img .* obj.win; + end + + function [resp, d, psr] = correlate(obj, img) + %CORRELATE Correlate filter over image and find peak in response + % + % [resp, d, psr] = obj.correlate(img) + % + % ## Input + % * __img__ processed image + % + % ## Output + % * __resp__ correlation response + % * __d__ offset of peak location from center `[x,y]` + % * __psr__ PSR value, a measure of correlation peak strength. Can + % be used to stop the online update if PSR is too low, which is + % an indication the the object is occluded or tracking has + % failed. + % + + % correlation (performed in frequency domain) + resp = cv.mulSpectrums(cv.dft(img, 'ComplexOutput',true), ... + obj.H, 'ConjB',true); + resp = cv.dft(resp, 'Inverse',true, 'Scale',true, 'RealOutput',true); + + % max value location in correlation response + [mval, midx] = max(resp(:)); + [my,mx] = ind2sub(size(resp), midx); + mloc = [mx my]; %TODO: -1 ?? + + % offset from center + d = mloc - floor([size(resp,2) size(resp,1)] / 2); + + % compute PSR + sidelobe = cv.rectangle(resp, mloc-5, mloc+5, ... + 'Color',NaN, 'Thickness',-1); + sidelobe = sidelobe(~isnan(sidelobe)); + psr = (mval - mean(sidelobe)) / std(sidelobe); + end + + function update_kernels(obj, img, rate) + %UPDATE_KERNELS Update correlation kernels + % + % obj.update_kernels(img) + % obj.update_kernels(img, rate) + % + % ## Input + % * __img__ processed image + % * __rate__ learning rate + % + + F = cv.dft(img, 'ComplexOutput',true); + H_1 = cv.mulSpectrums(obj.G, F, 'ConjB',true); + H_2 = cv.mulSpectrums( F, F, 'ConjB',true); + + if nargin < 3 + % initialization phase + obj.H1 = obj.H1 + H_1; + obj.H2 = obj.H2 + H_2; + else + % update phase + obj.H1 = obj.H1 * (1 - rate) + H_1 * rate; + obj.H2 = obj.H2 * (1 - rate) + H_2 * rate; + end + + obj.H = complex(obj.H1(:,:,1), obj.H1(:,:,2)) ./ ... + complex(obj.H2(:,:,1), obj.H2(:,:,2)); + obj.H = cat(3, real(obj.H), -imag(obj.H)); + end + end +end + +function img = random_warp(img, coef) + %RANDOM_WARP Warp image using a random affine transformation + % + % img = random_warp(img) + % img = random_warp(img, coef) + % + % ## Input + % * __img__ input image + % + % ## Output + % * __img__ output warped image + % * __coef__ randomness coefficient, default 0.2 + % + + if nargin < 2, coef = 0.2; end + ang = (rand() - 0.5) * coef; + T = [cos(ang) -sin(ang); sin(ang) cos(ang)]; + T = T + (rand(2) - 0.5) * coef; + + sz = [size(img,2); size(img,1)]; + c = fix(sz / 2); + T(:,3) = c - T * c; + + img = cv.warpAffine(img, T, 'DSize',sz, 'BorderType','Reflect'); +end diff --git a/samples/common/PlaneTracker.m b/samples/common/PlaneTracker.m new file mode 100644 index 000000000..b3d16cb54 --- /dev/null +++ b/samples/common/PlaneTracker.m @@ -0,0 +1,205 @@ +classdef PlaneTracker < handle + %PLANETRACKER Multi-target planar tracker + % + % Performs tracking with homography matching using features2d framework. + % ORB features and FLANN matcher are used by default. + % + % ## Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.1/samples/python/plane_tracker.py + % + + properties + % minimum number of keypoints detected to be considered as candidate + min_kpts_count = 10; % should be >= 2 + % used in 2-NN ratio test to be considered a good match + match_ratio = 0.75; + % minimum number of good matches, otherwise excluded + min_match_count = 10; % should be >= 4 + % minimum number of inlier points for a valid homography estimation + min_inliers = 10; + % RANSAC maximum reprojection error to treat a point pair as inliers + ransac_thresh = 3.0; + end + + properties (SetAccess = private) + % feature detector / descriptor extractor object + detector + % descritpr matcher object + matcher + % plane targets to track + % (struct array as described in PlaneTracker.addTarget) + targets + end + + methods + function obj = PlaneTracker(varargin) + %PLANETRACKER Constructor + % + % obj = PlaneTracker() + % obj = PlaneTracker(detector, matcher) + % + % ## Input + % * __detector__ feature detector, default `ORB` + % * __matcher__ descriptor matcher, default `FlannBasedMatcher` + % + + % feature detector + if nargin < 1 + obj.detector = cv.ORB('MaxFeatures',1000); + else + obj.detector = varargin{1}; + end + + % descriptor matcher + if nargin < 2 + obj.matcher = cv.DescriptorMatcher('FlannBasedMatcher', ... + 'Index',... + {'LSH', 'TableNumber',6, 'KeySize',12, 'MultiProbeLevel',1}); + else + obj.matcher = varargin{2}; + end + + % initialize plane targets struct array + obj.targets = struct('image',{}, 'rect',{}, 'kpts',{}, 'descs',{}); + end + + function varargout = addTarget(obj, img, win) + %ADDTARGET Add a new tracking target + % + % t = obj.addTarget(img, win) + % + % ## Input + % * __img__ target image, containing target to track + % * __win__ rectangle around plane target `[x,y,w,h]` + % + % ## Output + % * __t__ PlaneTarget struct with the following fields: + % * __image__ image in which target is tracked + % * __rect__ tracked rectangle `[x1,y1,x2,y2]` + % (as 2 opposing corners, top-left and bottom-right) + % * __kpts__ keypoints detected inside rectangle + % * __descrs__ their descriptors + % + + % new target to track + t = struct(); + t.image = img; + t.rect = [cv.Rect.tl(win), cv.Rect.br(win)]; + + % create ROI mask + mask = zeros(size(img,1), size(img,2), 'uint8'); + mask = cv.rectangle(mask, win, 'Color',255, 'Thickness','Filled'); + + % detect and compute features of first frame inside ROI region + [t.kpts, t.descs] = obj.detector.detectAndCompute(img, 'Mask',mask); + if numel(t.kpts) < obj.min_kpts_count + disp('Not enough keypoints detected in target'); + return; + end + + % add descriptors to matcher training set + obj.matcher.add({t.descs}); + + % store target + obj.targets(end+1) = t; + if nargout > 0 + varargout{1} = t; + end + end + + function clear(obj) + %CLEAR Remove all tracked targets + % + % obj.clear() + % + + obj.detector.clear(); + obj.matcher.clear(); + obj.targets = obj.targets([]); + end + + function tracked = track(obj, img) + %TRACK Return a list of detected tracked objects + % + % tracked = obj.track(img) + % + % ## Input + % * __img__ new input image in which to track targets + % + % ## Output + % * __tracked__ TrackedTarget struct array with these fields: + % * __index__ index of tracked target (1-based index into + % `targets` array) + % * __target__ reference to PlanarTarget + % * __pt0__ matched points coords in target image + % * __pt1__ matched points coords in new input frame + % * __H__ 3x3 homography matrix from `pt0` to `pt1` + % * __quad__ target boundary quad in new input frame + % (top-left, top-right, bottom-right, bottom-left) + % + + % initialize tracked target struct array + tr = struct('index',[], 'target',[], 'pt0',[], 'pt1',[], 'H',[], 'quad',[]); + tracked = tr([]); + + % check that addTarget was called + if isempty(obj.targets) + return; + end + + % detect and compute features in current frame + [kpts, descs] = obj.detector.detectAndCompute(img); + if numel(kpts) < obj.min_kpts_count + return; + end + + % match against first frames features (all targets at once) using + % 2-NN matching with ratio test (if closest match is MATCH_RATIO + % closer than the second closest one, then its a good match) + matches = obj.matcher.knnMatch(descs, 2); + idx = cellfun(@(m) numel(m) == 2 && ... + (m(1).distance < obj.match_ratio * m(2).distance), matches); + matches = cellfun(@(m) m(1), matches(idx)); + if numel(matches) < obj.min_match_count + return; + end + + % loop over matches for each target + for i=1:numel(obj.targets) + % matches by target id + m = matches([matches.imgIdx] == (i-1)); + if numel(m) < obj.min_match_count + continue; + end + + tr.index = i; + tr.target = obj.targets(i); + + % estimate homography using RANSAC + tr.pt0 = cat(1, tr.target.kpts([m.trainIdx] + 1).pt); + tr.pt1 = cat(1, kpts([m.queryIdx] + 1).pt); + [tr.H, inliers] = cv.findHomography(tr.pt0, tr.pt1, ... + 'Method','Ransac', 'RansacReprojThreshold',obj.ransac_thresh); + inliers = logical(inliers); + if isempty(tr.H) || nnz(inliers) < obj.min_inliers + continue; + end + % keep only inlier points + tr.pt0 = tr.pt0(inliers,:); + tr.pt1 = tr.pt1(inliers,:); + + % project object bounding box using homography to locate it in new frame + tr.quad = tr.target.rect([1 2; 3 2; 3 4; 1 4]); % [TL; TR; BR; BL] + tr.quad = cv.perspectiveTransform(tr.quad, tr.H); + + % append to tracked targets + tracked(end+1) = tr; + end + + % sort tracked targets by number of matched points + [~,ord] = sort(arrayfun(@(tr) size(tr.pt0,1), tracked), 'descend'); + tracked = tracked(ord); + end + end +end diff --git a/samples/common/RectSelector.m b/samples/common/RectSelector.m new file mode 100644 index 000000000..298b5352c --- /dev/null +++ b/samples/common/RectSelector.m @@ -0,0 +1,246 @@ +classdef RectSelector < handle + %RECTSELECTOR Select a rectangle by drawing a box using the mouse + % + % You can set a callback function to be invoked when a selection is made + % (asynchronous). You can also block and wait for a selection to be made + % (synchronous). + % + % ## Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.1/samples/python/common.py + % * https://github.com/opencv/opencv_contrib/blob/3.2.0/modules/tracking/src/roiSelector.cpp + % * https://github.com/opencv/opencv/blob/3.3.1/modules/highgui/src/roiSelector.cpp + % + % See also: imrect, rbbox, dragrect + % + + properties + clip % logical, clip rectangle coordinates to image limits + crosshair % logical, whether to draw crosshairs inside rectangle + callback % function handle, called when a rectangle is selected + end + + properties (Access = private) + waiting % keep track of uiwait/uiresume state + lh % event listener object + handles % struct of graphics handles (image, line, axes, figure) + lims % x/y axis limits of image [xmn xmx ymn ymx] + pt % selection starting point [x y] + rect % selection rectangle [x1 y1 x2 y2] (2 opposing corners TL/BR) + end + + methods + function obj = RectSelector(himg) + %RECTSELECTOR Constructor + % + % obj = RectSelector(himg) + % + % ## Input + % * __himg__ handle to image graphics object + % + + assert(ishghandle(himg) && strcmp(get(himg, 'Type'), 'image')); + + % initialize state + obj.clip = false; + obj.crosshair = false; + obj.callback = ''; + obj.waiting = false; + obj.pt = []; + obj.rect = []; + obj.lims = [get(himg, 'XData'), get(himg, 'YData')]; + + % create a line object to plot dragging box + hax = ancestor(himg, 'axes'); + hfig = ancestor(hax, 'figure'); + hlin = line('XData',NaN, 'YData',NaN, 'Parent',hax, ... + 'Color','g', 'LineWidth',2); + + % store graphics handles + obj.handles = struct('img',himg, 'lin',hlin, 'ax',hax, 'fig',hfig); + + % attach mouse press event handlers + % (we could also use iptaddcallback/iptremovecallback which + % supports having multiple callbacks without overriding existing + % ones if any) + set(hfig, 'WindowButtonDownFcn',@obj.onMouseDown, ... + 'Interruptible','off', 'BusyAction','cancel'); + + % call destructor when paired graphics object is destroyed + obj.lh = event.listener(hfig, 'ObjectBeingDestroyed', ... + @(~,~) obj.delete()); + end + + function delete(obj) + %DELETE Destructor + % + % obj.delete() + % + + % remove event listener + if ~isempty(obj.lh) + delete(obj.lh); + end + + % detach mouse press event handler + set(obj.handles.fig, 'WindowButtonDownFcn',''); + + % delete line object + delete(obj.handles.lin); + + % resume execution if waiting + resume(obj); + end + + function setLineProps(obj, varargin) + %SETLINEPROPS Set line properties + % + % obj.setLineProps(...) + % + % See also: set, line + % + + set(obj.handles.lin, varargin{:}); + end + + function bool = isDragging(obj) + %ISDRAGGING Return whether we are currently in dragging mode + % + % bool = obj.isDragging() + % + % ## Output + % * __bool__ true or false + % + + bool = ~isempty(obj.pt); + end + + function pos = getPosition(obj) + %GETPOSITION Get last position of selected rectangle + % + % pos = obj.getPosition() + % + % ## Output + % * __pos__ last rectangle position `[x y w h]`, empty otherwise + % + + if ~isempty(obj.rect) + pos = cv.Rect.from2points(obj.rect(1:2), obj.rect(3:4)); + else + pos = []; + end + end + + function pos = wait(obj) + %WAIT Block and wait for a rectangle selection to be made + % + % pos = obj.wait() + % + % ## Output + % * __pos__ rectangle position `[x y w h]` + % + % See also: uiwait, waitfor + % + + pos = []; + if ~obj.waiting + obj.waiting = true; + uiwait(obj.handles.fig); + + % destructor could have been called if figure was closed + % (by the ObjectBeingDestroyed even listener) + if isvalid(obj) + obj.waiting = false; + pos = obj.getPosition(); + end + end + end + + function resume(obj) + %RESUME Resume, useful to cancel waiting from UI callbacks + % + % obj.resume() + % + % See also: uiresume + % + + if obj.waiting + obj.waiting = false; + uiresume(obj.handles.fig); + end + end + end + + methods (Access = private) + function cp = currentPoint(obj) + %CURRENTPOINT Get current axis point + % + % cp = obj.currentPoint() + % + % ## Output + % * __cp__ current axis point (in data units) `[x,y]` + % + + % get current point + cp = get(obj.handles.ax, 'CurrentPoint'); + cp = cp(1,1:2); + + % clamp to within image coordinates + if obj.clip + cp = min(max(cp, obj.lims([1 3])), obj.lims([2 4])); + end + end + + function onMouseDown(obj, ~, ~) + %ONMOUSEDOWN Event handler for figure mouse press + + % starting point of rectangle + obj.pt = currentPoint(obj); + + % show dragging box + set(obj.handles.lin, 'XData',obj.pt(1), 'YData',obj.pt(2)); + + % attach mouse move/release event handlers, and change pointer + set(obj.handles.fig, 'Pointer','cross', ... + 'WindowButtonMotionFcn',@obj.onMouseMove, ... + 'WindowButtonUpFcn',@obj.onMouseUp); + end + + function onMouseMove(obj, ~, ~) + %ONMOUSEMOVE Event handler for figure mouse move + + % compute top-left and bottom-right corners of rectangle + cp = currentPoint(obj); + obj.rect = [min(cp, obj.pt), max(cp, obj.pt)]; + + % show dragging box + x = [1 3 3 1 1]; + y = [2 2 4 4 2]; + if obj.crosshair + x = [x 3 1 3]; + y = [y 4 4 2]; + end + set(obj.handles.lin, 'XData',obj.rect(x), 'YData',obj.rect(y)); + end + + function onMouseUp(obj, ~, ~) + %ONMOUSEUP Event handler for figure mouse release + + % detach mouse move/release event handlers, and restore pointer + set(obj.handles.fig, 'Pointer','arrow', ... + 'WindowButtonMotionFcn','', 'WindowButtonUpFcn',''); + + % reset dragging box + obj.pt = []; + set(obj.handles.lin, 'XData',NaN, 'YData',NaN); + + % evaluate user callback function + if ~isempty(obj.callback) + feval(obj.callback, obj.getPosition()); + end + + % resume execution if waiting + resume(obj); + end + end +end diff --git a/samples/common/VideoSynthAruco.m b/samples/common/VideoSynthAruco.m new file mode 100644 index 000000000..c514e7374 --- /dev/null +++ b/samples/common/VideoSynthAruco.m @@ -0,0 +1,217 @@ +classdef VideoSynthAruco < VideoSynthBase + %VIDEOSYNTHARUCO Aruco synthetic video class + % + % ### Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/video.py + % + % See also: VideoSynthBase, createVideoCapture + % + + properties (SetAccess = protected) + time % current time + time_step % time step + cam_mat % camera matrix (intrinsic parameters) + dist_coeff % lens distortion coefficients + fg % foreground image (aruco imag) + end + + methods + function this = VideoSynthAruco(varargin) + %VIDEOSYNTHARUCO Constructor + % + % cap = VideoSynthAruco('OptionName', optionValue, ...) + % + % ## Options + % Accepts the same options as the open method. + % + % See also: VideoSynthAruco.open + % + + % explicitly call superclass constructor with specified arguments + this = this@VideoSynthBase(varargin{:}); + end + + function release(this) + %RELEASE Clears allocated data + % + % cap.release() + % + + % call superclass RELEASE method + release@VideoSynthBase(this); + + this.time = 0; + this.time_step = 1; + this.cam_mat = eye(3,3); + this.dist_coeff = zeros(1,4); + this.fg = []; + end + + function [retval, p] = open(this, varargin) + %OPEN Opens a synthetic video stream + % + % retval = cap.open('OptionName',optionValue, ...) + % + % ## Output + % * __retval__ bool, true if successful + % + % ## Options + % Same as base class options, in addition to: + % * __TimeStep__ time step, default 1/30 + % * __DistCoeffs__ radial distortion coefficients, + % default [-0.2 0.1 0 0] + % * __FG__ aruco board image, or one of the following presets: + % * __ArUcoMarker__ see cv.drawMarkerAruco + % * __ChArUcoDiamond__ see cv.drawCharucoDiamond + % * __ArUcoBoard__ see cv.drawPlanarBoard + % * __ChArUcoBoard__ see cv.drawCharucoBoard + % + % See also: VideoSynthBase.open + % + + % call superclass OPEN method + [retval, p] = open@VideoSynthBase(this, varargin{:}); + if ~retval, return; end + + % time variable + this.time = 0; + this.time_step = p.Results.TimeStep; + + % intrinsic camera parameters + fx = 0.9; % focal lengths expressed in pixel-related units + this.cam_mat = [ + fx*this.w 0 (this.w-1)/2 ; + 0 fx*this.h (this.h-1)/2 ; + 0 0 1 + ]; + this.dist_coeff = p.Results.DistCoeffs; + + % aruco image + if ischar(p.Results.FG) + % one of presets + this.fg = create_aruco_image(p.Results.FG); + else + % any image really + this.fg = p.Results.FG; + end + end + end + + methods (Access = protected) + function p = optionsParser(this) + %OPTIONSPARSER Input arguments parser + % + % p = cap.optionsParser() + % + % See also: VideoSynthBase.optionsParser + % + + p = optionsParser@VideoSynthBase(this); + p.addParameter('TimeStep', 1/30); + p.addParameter('DistCoeffs', [-0.2 0.1 0 0]); + p.addParameter('FG', 'ArucoBoard'); + end + + function img = render(this, img) + %RENDER Renders additional layers as needed on top of background + % + % img = cap.render(img) + % + % See also: VideoSynthBase.render + % + + fsz = [size(this.fg,2) size(this.fg,1)]; + + % get current pose (rotation/translation vectors) + [rvec, tvec] = getSyntheticRT(this.time, fsz); + this.time = this.time + this.time_step; + + % aruco image corners + p3 = [0 0; fsz(1) 0; fsz(1) fsz(2); 0 fsz(2)]; + p3(:,3) = 0; % Z=0 + + % project corners from 3D to 2D + p2 = cv.projectPoints(p3, rvec, tvec, ... + this.cam_mat, 'DistCoeffs',this.dist_coeff); + + % compute perspective transform (between 4 matching points) + if true + T = cv.getPerspectiveTransform(p3(:,1:2), p2); + else + T = cv.findHomography(p3(:,1:2), p2); + end + + % apply transform to aruco image projected on top of background image + img = cv.warpPerspective(flip(this.fg, 2), T, ... + 'Dst',img, 'DSize',this.sz, 'BorderType','Transparent'); + end + end +end + +%% Helper functions + +function img = create_aruco_image(config) + % draw aruco/charuco marker or board from one of predefined configurations + config = validatestring(config, ... + {'ArUcoMarker', 'ChArUcoDiamond', 'ArUcoBoard', 'ChArUcoBoard'}); + dict = {'Predefined', '6x6_250'}; + switch config + case 'ArUcoMarker' + id = randi(250) - 1; + img = cv.drawMarkerAruco(dict, id, 200); + img = cv.copyMakeBorder(img, [25 25 25 25], ... + 'BorderType','Constant', 'Value',255); + case 'ChArUcoDiamond' + ids = randi(250, [1 4]) - 1; + img = cv.drawCharucoDiamond(dict, ids, 120, 60, 'MarginSize',30); + case 'ArUcoBoard' + board = {'GridBoard', 5, 7, 60, 15, dict}; + sz = [board{2:3}] * sum([board{4:5}]) - board{5} + 2 * board{5}; + img = cv.drawPlanarBoard(board, sz, 'MarginSize',board{5}); + case 'ChArUcoBoard' + board = {5, 7, 60, 30, dict}; + margins = board{3} - board{4}; + sz = [board{1:2}] * board{3} + 2 * margins; + img = cv.drawCharucoBoard(board, sz, 'MarginSize',margins); + end + img = cv.cvtColor(img, 'GRAY2RGB'); +end + +function [rvec, tvec] = getSyntheticRT(t, sz) + % compute eye/target positions for transformed image + center = [0.5*sz 0]; + phi = pi/3 + sin(t*3)*pi/8; + ofs = [sin(1.2*t) cos(1.8*t) 0] * sz(1) * 0.2; + eye_pos = center + [cos(t)*cos(phi) sin(t)*cos(phi) sin(phi)] * 15*60 + ofs; + target_pos = center + ofs; + + % compute rotation matrix, translation vector (extrinsic parameters) + [R, tvec] = lookat(eye_pos(:), target_pos(:)); + rvec = mtx2rvec(R); +end + +function [R, tvec] = lookat(eyePos, targetPos, up) + if nargin < 3, up = [0; 0; 1]; end + fwd = (targetPos - eyePos); + fwd = fwd / norm(fwd); + right = cross(fwd, up); + right = right / norm(right); + down = cross(fwd, right); + R = [right, down, fwd]'; + tvec = -R * eyePos(:); +end + +function rvec = mtx2rvec(R) + % rotation vector (compact representation of a rotation matrix) + if true + rvec = cv.Rodrigues(R); + else + [U,S,V] = svd(R - eye(3)); + p = V(:,1) + U(:,1)*S(1,1); + c = dot(V(:,1), p); + s = dot(V(:,2), p); + ax = cross(V(:,1), V(:,2)); + rvec = ax * atan2(s, c); + end +end diff --git a/samples/common/VideoSynthBase.m b/samples/common/VideoSynthBase.m new file mode 100644 index 000000000..3823ee87b --- /dev/null +++ b/samples/common/VideoSynthBase.m @@ -0,0 +1,293 @@ +classdef VideoSynthBase < handle + %VIDEOSYNTHBASE Base synthetic video class + % + % A stub/mock of the cv.VideoCapture class. + % + % ### Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/video.py + % + % See also: cv.VideoCapture, createVideoCapture + % + + properties (SetAccess = protected) + iter % frame counter + max_iter % max number of frames + bg % background image (always RGB) + end + properties (Dependent, SetAccess = private) + w % frame width + h % frame height + sz % frame size [w,h] + end + properties + noise % noise level [0,1] + end + + methods + function this = VideoSynthBase(varargin) + %VIDEOSYNTHBASE Constructor + % + % cap = VideoSynthBase('OptionName', optionValue, ...) + % + % ## Options + % Accepts the same options as the open method. + % + % See also: VideoSynthBase.open + % + + this.release(); + this.open(varargin{:}); + end + + function release(this) + %RELEASE Clears allocated data + % + % cap.release() + % + + this.iter = 0; + this.max_iter = Inf; + this.bg = []; + this.noise = 0.0; + end + + function [retval, p] = open(this, varargin) + %OPEN Opens a synthetic video stream + % + % retval = cap.open('OptionName',optionValue, ...) + % + % ## Output + % * __retval__ bool, true if successful + % + % ## Options + % * __FrameCount__ max number of frames to produce, default Inf + % * __BG__ background image, default 640x480 gray image + % * __Size__ width/height of frames `[w,h]`, default [0,0] + % * __Noise__ amount of noise to add (number in the range [0,1]), + % default 0 + % + % See also: VideoSynthBase.optionsParser + % + + % parse input params + p = this.optionsParser(); + p.parse(varargin{:}); + this.max_iter = p.Results.FrameCount; + this.bg = p.Results.BG; + this.noise = p.Results.Noise; + sz = p.Results.Size; + + % background image + if isempty(this.bg) + if all(sz==0), sz = [640 480]; end + this.bg = 128 * ones([sz(2) sz(1) 3], 'uint8'); + elseif all(sz~=0) + this.bg = cv.resize(this.bg, sz); + end + + % success return value + retval = true; + end + + function retval = isOpened(this) + %ISOPENED Returns true if has been successfully initialized + % + % retval = cap.isOpened() + % + % ## Output + % * __retval__ true if object has been successfully initialized. + % + % See also: VideoSynthBase.open + % + + retval = this.isvalid(); + end + + function img = read(this) + %READ Grabs and returns the next video frame + % + % frame = cap.read() + % + % ## Output + % * __frame__ output image + % + % See also: cv.VideoCapture.read + % + + if this.iter >= this.max_iter + img = uint8([]); + else + % increment counter + this.iter = this.iter + 1; + + % start with the background image, + % and render additional layers as needed on top + img = this.render(this.bg); + + % add noise + if this.noise > 0 + img = this.addNoise(img); + end + end + end + + function value = get(this, prop) + %GET Returns the specified property + % + % value = cap.get(prop) + % + % ## Input + % * __prop__ Property identifier. + % + % ## Output + % * __value__ Value for the specified property. + % + % This method is partly implemented, to expose + % cv.VideoCapture compatible API. + % + % See also: VideoSynthBase.set, cv.VideoCapture.get + % + + switch upper(prop) + case 'FRAMEWIDTH', value = this.w; + case 'FRAMEHEIGHT', value = this.h; + case 'FOURCC', value = 'FAKE'; + case 'POSFRAMES', value = this.iter; + case 'FRAMECOUNT', value = this.max_iter; + case 'POSAVIRATIO', value = this.iter / this.max_iter; + otherwise + value = -1; % 0 + end + end + + function set(this, prop, value) + %SET Sets a property + % + % cap.set(prop, value) + % + % ## Input + % * __prop__ Property identifier. + % * __value__ Value of the property. + % + % This method is partly implemented, to expose + % cv.VideoCapture compatible API. + % + % See also: VideoSynthBase.get, cv.VideoCapture.set + % + + switch upper(prop) + case 'FRAMEWIDTH' + if ~isempty(this.bg) + this.bg = cv.resize(this.bg, [value this.h]); + end + case 'FRAMEHEIGHT' + if ~isempty(this.bg) + this.bg = cv.resize(this.bg, [this.w value]); + end + case 'POSFRAMES' + this.iter = value; + case 'FRAMECOUNT' + this.max_iter = value; + end + end + + function w = get.w(this) + w = size(this.bg,2); + end + + function h = get.h(this) + h = size(this.bg,1); + end + + function sz = get.sz(this) + sz = [size(this.bg,2) size(this.bg,1)]; + end + end + + methods (Access = protected) + function p = optionsParser(this) + %OPTIONSPARSER Input arguments parser + % + % p = cap.optionsParser() + % + % ## Output + % * __p__ input parser object + % + % See also: inputParser + % + + p = inputParser(); + p.addParameter('FrameCount', Inf); + p.addParameter('BG', []); + p.addParameter('Size', [0 0]); + p.addParameter('Noise', 0); + end + + function img = render(this, img) + %RENDER Renders additional layers as needed on top of background + % + % img = cap.render(img) + % + % ## Input + % * __img__ input frame + % + % ## Output + % * __img__ output frame + % + % See also: VideoSynthBase.read + % + + % nothing here, method can be be overriden in subclasses + end + + function img = addNoise(this, img) + %ADDNOISE Adds gaussian noise to image (mean=0, std=noise) + % + % img = cap.addNoise(img) + % + % ## Input + % * __img__ input frame + % + % ## Output + % * __img__ output frame + % + % See also: imnoise + % + + %HACK: try/catch faster than testing mexopencv.require('images') + try + img = imnoise(img, 'gaussian', 0, this.noise^2); + catch + img = my_imnoise(img, this.noise); + end + end + end +end + +function img = my_imnoise(img, noise_sd) + %MY_IMNOISE Adds noise to image + % + % img = my_imnoise(img, noise) + % + % See also: imnoise + % + + % handle integer vs double/single images + clss = class(img); + if isinteger(img) + mxVal = double(intmax(clss)); + img = double(img) / mxVal; + changedClass = true; + else + mxVal = 1.0; + changedClass = false; + end + + img = img + randn(size(img)) * noise_sd; + img = max(min(img, 1), 0); % clip to [0,1] + + if changedClass + img = cast(round(img * mxVal), clss); + end +end diff --git a/samples/common/VideoSynthChess.m b/samples/common/VideoSynthChess.m new file mode 100644 index 000000000..0379d9303 --- /dev/null +++ b/samples/common/VideoSynthChess.m @@ -0,0 +1,230 @@ +classdef VideoSynthChess < VideoSynthBase + %VIDEOSYNTHCHESS Chess synthetic video class + % + % ### Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/video.py + % + % See also: VideoSynthBase, createVideoCapture + % + + properties (SetAccess = protected) + time % current time + time_step % time step + cam_mat % camera matrix (intrinsic parameters) + dist_coeff % lens distortion coefficients + grid_size % checkerboard grid size + end + properties (Access = protected) + quads_w % white quads of grid + quads_b % black quads of grid + end + + methods + function this = VideoSynthChess(varargin) + %VIDEOSYNTHCHESS Constructor + % + % cap = VideoSynthChess('OptionName', optionValue, ...) + % + % ## Options + % Accepts the same options as the open method. + % + % See also: VideoSynthChess.open + % + + % explicitly call superclass constructor with specified arguments + this = this@VideoSynthBase(varargin{:}); + end + + function release(this) + %RELEASE Clears allocated data + % + % cap.release() + % + + % call superclass RELEASE method + release@VideoSynthBase(this); + + this.time = 0; + this.time_step = 1; + this.cam_mat = eye(3,3); + this.dist_coeff = zeros(1,4); + this.grid_size = [0 0]; + this.quads_w = {}; + this.quads_b = {}; + end + + function [retval, p] = open(this, varargin) + %OPEN Opens a synthetic video stream + % + % retval = cap.open('OptionName',optionValue, ...) + % + % ## Output + % * __retval__ bool, true if successful + % + % ## Options + % Same as base class options, in addition to: + % * __TimeStep__ time step, default 1/30 + % * __DistCoeffs__ radial distortion coefficients, + % default [-0.2 0.1 0 0] + % * __GridSize__ grid size, default [10 7] + % + % See also: VideoSynthBase.open + % + + % call superclass OPEN method + [retval, p] = open@VideoSynthBase(this, varargin{:}); + if ~retval, return; end + + % time variable + this.time = 0; + this.time_step = p.Results.TimeStep; + + % intrinsic camera parameters + fx = 0.9; % focal lengths expressed in pixel-related units + this.cam_mat = [ + fx*this.w 0 (this.w-1)/2 ; + 0 fx*this.h (this.h-1)/2 ; + 0 0 1 + ]; + this.dist_coeff = p.Results.DistCoeffs; + + % grid quads + this.grid_size = p.Results.GridSize; + [this.quads_w, this.quads_b] = create_grid_quads(this.grid_size); + end + end + + methods (Access = protected) + function p = optionsParser(this) + %OPTIONSPARSER Input arguments parser + % + % p = cap.optionsParser() + % + % See also: VideoSynthBase.optionsParser + % + + p = optionsParser@VideoSynthBase(this); + p.addParameter('TimeStep', 1/30); + p.addParameter('DistCoeffs', [-0.2 0.1 0 0]); + p.addParameter('GridSize', [10 7]); + end + + function img = render(this, img) + %RENDER Renders additional layers as needed on top of background + % + % img = cap.render(img) + % + % See also: VideoSynthBase.render + % + + % get current pose (rotation/translation vectors) + [rvec, tvec] = getSyntheticRT(this.time, this.grid_size); + this.time = this.time + this.time_step; + + % draw checkerboard quads on top of image + img = draw_quads(img, this.quads_w, rvec, tvec, ... + this.cam_mat, this.dist_coeff, [255 255 255]-10); + img = draw_quads(img, this.quads_b, rvec, tvec, ... + this.cam_mat, this.dist_coeff, [0 0 0]+10); + end + end +end + +%% Helper functions + +function [white_quads, black_quads] = create_grid_quads(sz) + % build grid base-points (with Z=0) + [X,Y,Z] = meshgrid(0:sz(2)-1, 0:sz(1)-1, 0); + XYZ = [Y(:) X(:) Z(:)]; + + % assignment of quads to white/black <-> 1/2 + wbIdx = mod(sum(XYZ, 2), 2) + 1; + + % generate the quad points from base points + inc = [0 0 0; 1 0 0; 1 1 0; 0 1 0]; % BL, BR, TR, TL + XYZ = cellfun(@(c) bsxfun(@plus, c, inc), ... + num2cell(XYZ,2), 'UniformOutput',false); + + % split them into alternating white/black + white_quads = XYZ(wbIdx == 1); + black_quads = XYZ(wbIdx == 2); +end + +function [rvec, tvec] = getSyntheticRT(t, sz) + % compute eye/target positions for transformed checkerboard + center = [0.5*sz 0]; + phi = pi/3 + sin(t*3)*pi/8; + ofs = [sin(1.2*t) cos(1.8*t) 0] * sz(1) * 0.2; + eye_pos = center + [cos(t)*cos(phi) sin(t)*cos(phi) sin(phi)] * 15 + ofs; + target_pos = center + ofs; + + % compute rotation matrix, translation vector (extrinsic parameters) + [R, tvec] = lookat(eye_pos(:), target_pos(:)); + rvec = mtx2rvec(R); +end + +function [R, tvec] = lookat(eyePos, targetPos, up) + if nargin < 3, up = [0; 0; 1]; end + fwd = (targetPos - eyePos); + fwd = fwd / norm(fwd); + right = cross(fwd, up); + right = right / norm(right); + down = cross(fwd, right); + R = [right, down, fwd]'; + tvec = -R * eyePos(:); +end + +function rvec = mtx2rvec(R) + % rotation vector (compact representation of a rotation matrix) + if true + rvec = cv.Rodrigues(R); + else + [U,S,V] = svd(R - eye(3)); + p = V(:,1) + U(:,1)*S(1,1); + c = dot(V(:,1), p); + s = dot(V(:,2), p); + ax = cross(V(:,1), V(:,2)); + rvec = ax * atan2(s, c); + end +end + +function img = draw_quads(img, quads, rvec, tvec, cam_mat, dist_coeff, clr) + %DRAW_QUADS Render quads on top of image with specified color + % + % img = draw_quads(img, quads, rvec, tvec, cam_mat, dist_coeff, clr) + % + % ## Input + % * __img__ input frame + % * __quads__ cell array of quad corners `{[x y z; ..], ..}`, + % four corners per quad + % * __rvec__ rotation vector + % * __tvec__ translation vector + % * **cam_mat** camera matrix + % * **dist_coeff** distortion coefficients + % * __clr__ color + % + % ## Output + % * __img__ output frame + % + + if nargin < 7, clr = [128 128 128]; end + + % project all points (concatenated) from 3D to 2D + img_quads = cv.projectPoints(cat(1, quads{:}), rvec, tvec, ... + cam_mat, 'DistCoeffs',dist_coeff); + + % draw quads + num = size(img_quads,1) / 4; + if true + % separate back each 4 points in a cell + q = mat2cell(img_quads, repmat(4,1,num), 2); + img = cv.fillPoly(img, q, 'Color',clr, 'LineType','AA'); + else + % slower + for i=1:num + img = cv.fillConvexPoly(img, img_quads(i,:)*4, ... + 'Color',clr, 'LineType','AA', 'Shift',2); + end + end +end diff --git a/samples/common/VideoSynthScene.m b/samples/common/VideoSynthScene.m new file mode 100644 index 000000000..6f07694b7 --- /dev/null +++ b/samples/common/VideoSynthScene.m @@ -0,0 +1,155 @@ +classdef VideoSynthScene < VideoSynthBase + %VIDEOSYNTHSCENE Scene synthetic video class + % + % ### Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/video.py + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/tst_scene_render.py + % + % See also: VideoSynthBase, createVideoCapture + % + + properties (SetAccess = protected) + time + time_step + speed + deform + fg + end + properties (Access = protected) + radius % [rx ry] + center % [x y] + pts % [x y; x y; x y; xy] + end + properties (Dependent, GetAccess = protected, SetAccess = private) + offset % [x,y] + end + + methods + function this = VideoSynthScene(varargin) + %VIDEOSYNTHSCENE Constructor + % + % cap = VideoSynthScene('OptionName', optionValue, ...) + % + % ## Options + % Accepts the same options as the open method. + % + % See also: VideoSynthScene.open + % + + % explicitly call superclass constructor with specified arguments + this = this@VideoSynthBase(varargin{:}); + end + + function release(this) + %RELEASE Clears allocated data + % + % cap.release() + % + + % call superclass RELEASE method + release@VideoSynthBase(this); + + this.time = 0; + this.time_step = 1; + this.speed = 1; + this.deform = true; + this.fg = []; + this.radius = [0 0]; + this.center = [0 0]; + this.pts = zeros(4,3); + end + + function [retval, p] = open(this, varargin) + %OPEN Opens a synthetic video stream + % + % retval = cap.open('OptionName',optionValue, ...) + % + % ## Output + % * __retval__ bool, true if successful + % + % ## Options + % Same as base class options, in addition to: + % * __TimeStep__ time step, default 1/30 + % * __Speed__ speed, default 0.25 + % * __Deformation__ deform, default false + % * __FG__ foreground image + % + % See also: VideoSynthBase.open + % + + % call superclass OPEN method + [retval, p] = open@VideoSynthBase(this, varargin{:}); + if ~retval, return; end + + % animation params + this.time = 0; + this.time_step = p.Results.TimeStep; + this.speed = p.Results.Speed; + this.deform = p.Results.Deformation; + + % foreground image + if ~isempty(p.Results.FG) + this.fg = p.Results.FG; + fsz = [size(this.fg,2) size(this.fg,1)]; + this.center = fix((this.sz - fsz)/2); + this.radius = this.sz - (this.center + fsz); + else + this.pts = fix(bsxfun(@plus, this.sz/2, ... + [0 0; this.w 0; this.w this.h; 0 this.h]/10)); + this.radius = this.sz * 7/20; % [30 50] + end + end + + function offset = get.offset(this) + t = this.time * this.speed; + offset = fix(this.radius .* [cos(t) sin(t)]); + end + end + + methods (Access = protected) + function p = optionsParser(this) + %OPTIONSPARSER Input arguments parser + % + % p = cap.optionsParser() + % + % See also: VideoSynthBase.optionsParser + % + + p = optionsParser@VideoSynthBase(this); + p.addParameter('TimeStep', 1/30); + p.addParameter('Speed', 0.25); + p.addParameter('Deformation', false); + p.addParameter('FG', []); + end + + function [img, rect] = render(this, img) + %RENDER Renders additional layers as needed on top of background + % + % img = cap.render(img) + % + % See also: VideoSynthBase.render + % + + if ~isempty(this.fg) + fsz = [size(this.fg,2) size(this.fg,1)]; % [h,w] + c = this.center + this.offset; % [x,y] + rect = [c, fsz]; % [x,y,w,h] + if true + img(rect(2)+1:rect(2)+rect(4),rect(1)+1:rect(1)+rect(3),:) = this.fg; + else + % slower + img = cv.Rect.crop(img, rect, this.fg); + end + else + p = bsxfun(@plus, this.pts, this.offset); % 4x2 + rect = cv.Rect.from2points(p(1,:), p(3,:)); % [x,y,w,h] + if this.deform + p(2:3,:) = p(2:3,:) + this.h/20*cos(this.time); + end + img = cv.fillConvexPoly(img, p, 'Color',[0 0 255]); + end + this.time = this.time + this.time_step; + end + end +end diff --git a/samples/common/createVideoCapture.m b/samples/common/createVideoCapture.m new file mode 100644 index 000000000..2a98ec302 --- /dev/null +++ b/samples/common/createVideoCapture.m @@ -0,0 +1,197 @@ +function cap = createVideoCapture(source, fallback) + %CREATEVIDEOCAPTURE Create video capture object (real class or synthetic) + % + % cap = createVideoCapture(devid) + % cap = createVideoCapture(filename) + % cap = createVideoCapture(synth) + % cap = createVideoCapture(..., fallback) + % + % ## Input + % * __devid__ integer, id of the video capturing device to open. + % Passed to cv.VideoCapture class. + % * __filename__ string, name of video file. Passed to cv.VideoCapture + % * __synth__ procedural synthetic video. A string of the form + % 'synth|key=value|key=value'. Supported parameters: + % * __class__ one of: + % * __base__ noisy BG image (default if no class specified) + % * __scene__ noisy BG image with a rotating FG image in front + % * __chess__ noisy BG image with a moving chess board in front + % * __aruco__ noisy BG image with a moving aruco board in front + % * __framecount__ maximum number of frames of video stream + % * __bg__ path to background image file name + % * __size__ string in the form WxH specifying width/height of frames + % * __noise__ amount of noise to add (number in the range [0,1]) + % * __timestep__, __speed__, __deformation__, __fg__ see VideoSynthScene.open + % * __timestep__, __distcoeffs__, __gridsize__ see VideoSynthChess.open + % * __timestep__, __distcoeffs__, __fg__ see VideoSynthAruco.open + % * __fallback__ fallback in case devid or filename fail to open capture. + % One of the following predefined presets: + % * __lena__ noisy lena image + % * __chess__ noisy lena image with a moving 10x7 chess board + % * __aruco__ noisy lena image with a moving ArUco board + % * __charuco__ noisy lena image with a moving ChArUco board + % * __book__ box image rotating in front of graffiti image + % * __cube__ cube moving in front of a background image + % * anything else, 640x480 white noise + % + % ### Example + % + % cap = createVideoCapture('synth|class=chess|bg=../test/fruits.jpg|noise=0.3|size=320x240'); + % img = cap.read(); + % h = imshow(img); + % while ishghandle(h) + % img = cap.read(); + % set(h, 'CData',img) + % drawnow + % end + % + % ### Example + % + % % open first webcam, if fails fallback to fake lena video + % cap = createVideoCapture(0, 'lena'); + % assert(cap.isOpened()); + % cap.release(); + % + % ### Sources: + % + % * https://github.com/opencv/opencv/blob/3.3.0/samples/python/video.py + % + % See also: cv.VideoCapture, VideoSynthBase, VideoSynthScene, + % VideoSynthChess, VideoSynthAruco + % + + narginchk(0,2); + if nargin < 1, source = 0; end + if nargin < 2, fallback = 'chess'; end + if isempty(source) + % presets + cap = createVideoCapture(synth_presets(fallback)); + elseif isnumeric(source) || ~strncmpi(source, 'synth|', 6) + % devid or a filename + cap = cv.VideoCapture(source); + if ~cap.isOpened() + cap = createVideoCapture(synth_presets(fallback)); + end + else + % parse 'synth|key=value|key=value' string + [params, klass] = parse_string(source); + + % create VideoSynth of the specified class + switch klass + case 'aruco' + cap = VideoSynthAruco(params); + case 'chess' + cap = VideoSynthChess(params); + case 'scene' + cap = VideoSynthScene(params); + otherwise + cap = VideoSynthBase(params); + end + end +end + +function [params, klass] = parse_string(source) + % parse key/value pairs + params = regexp(source(7:end), '\|', 'split'); + params = cellfun(@(s) regexp(s, '=', 'split'), params, 'UniformOutput',false); + params = [params{:}]; + params(cellfun(@isempty, params)) = []; + if mod(numel(params), 2) == 0 + params(1:2:end) = lower(params(1:2:end)); + end + + % convert to struct, and parse string values + params = struct(params{:}); + params = structfun(@parse_value, params, 'UniformOutput',false); + + % get class param + if isfield(params, 'class') + klass = validatestring(params.class, {'base', 'scene', 'chess', 'aruco'}); + params = rmfield(params, 'class'); + else + klass = 'base'; + end +end + +function val = parse_value(str) + str = strtrim(str); + + % string as-is (fallback case) + val = str; + if isempty(val) + return; + end + + % numeric + tok = str2double(str); + if ~isnan(tok) + val = tok; + return; + end + + % size + tok = regexp(str, '^(\d+)x(\d+)$', 'tokens', 'once', 'ignorecase'); + if ~isempty(tok) + val = str2double(tok); + return; + end + + % logical + tok = regexp(str, '^(true|false)$', 'tokens', 'once', 'ignorecase'); + if ~isempty(tok) + val = strcmpi(tok, 'true'); + return; + end + + % image file path + [~,~,ext] = fileparts(str); + fmts = imformats(); + fmts = strcat('.', [fmts.ext]); + if any(strcmpi(ext, fmts)) && exist(str, 'file') == 2 + val = cv.imread(which(str), 'Color',true); + return; + end +end + +function source = synth_presets(name) + source = {'synth', 'noise=0.1'}; + %source{end+1} = 'framecount=300'; + %source{end+1} = 'size=320x320'; + switch lower(name) + case 'lena' + source{end+1} = ['bg=' mcv_which('lena.jpg')]; + case 'chess' + source{end+1} = 'class=chess'; + source{end+1} = ['bg=' mcv_which('lena.jpg')]; + case 'aruco' + source{end+1} = 'class=aruco'; + source{end+1} = ['bg=' mcv_which('lena.jpg')]; + source{end+1} = 'fg=ArUcoBoard'; + case 'charuco' + source{end+1} = 'class=aruco'; + source{end+1} = ['bg=' mcv_which('lena.jpg')]; + source{end+1} = 'fg=ChArUcoBoard'; + case 'box' + source{end+1} = 'class=aruco'; + source{end+1} = ['bg=' mcv_which('lena.jpg')]; + source{end+1} = ['fg=' mcv_which('box.png')]; + case 'book' + source{end+1} = 'class=scene'; + source{end+1} = ['bg=' mcv_which('graf1.png')]; + source{end+1} = ['fg=' mcv_which('box.png')]; + source{end+1} = 'size=640x480'; + source{end+1} = 'speed=1'; + case 'cube' + source{end+1} = 'class=scene'; + source{end+1} = ['bg=' mcv_which('fruits.jpg')]; + source{end+1} = 'speed=1'; + source{end+1} = 'deformation=true'; + otherwise + source{end+1} = 'size=640x480'; + end + source = strjoin(source, '|'); +end + +function p = mcv_which(fname) + p = fullfile(mexopencv.root(), 'test', fname); +end diff --git a/samples/connected_components_demo_gui.m b/samples/connected_components_demo_gui.m index bf173ed6e..4c703a5d9 100644 --- a/samples/connected_components_demo_gui.m +++ b/samples/connected_components_demo_gui.m @@ -1,6 +1,8 @@ -%% Connected Components demo +%% Connected Components % -% +% Sources: +% +% * % function varargout = connected_components_demo_gui(im) diff --git a/samples/contours2_demo.m b/samples/contours2_demo.m index b53f12c5c..e6f297b0b 100644 --- a/samples/contours2_demo.m +++ b/samples/contours2_demo.m @@ -2,8 +2,10 @@ % This program illustrates the use of |cv.findContours| and |cv.drawContours|. % The original image is put up along with the image of drawn contours. % -% , -% +% Sources: +% +% * +% * % %% Draw faces diff --git a/samples/contours_demo.m b/samples/contours_demo.m index e13316952..bca0167c1 100644 --- a/samples/contours_demo.m +++ b/samples/contours_demo.m @@ -37,7 +37,7 @@ % convert labels to one-based floating-point indices, background being label 1 labels = double(labels) + 1; if mexopencv.require('stats') - tabulate(labels(:)) + tabulate(labels(:)) end %% diff --git a/samples/contours_hierarchy_demo.m b/samples/contours_hierarchy_demo.m index 5db4d871c..7b3bd6543 100644 --- a/samples/contours_hierarchy_demo.m +++ b/samples/contours_hierarchy_demo.m @@ -8,8 +8,10 @@ % * Learn to find and draw contours using the functions |cv.findContours| and % |cv.drawContours|. % -% , -% +% Sources: +% +% * +% * % %% Contours @@ -71,7 +73,7 @@ % shows points what you got with |None| (734 points) and second image shows % the one with |Simple| (only 4 points). See how much memory it saves! % -% <> +% <> % % image with a simple rectangular shape @@ -110,7 +112,7 @@ % % Consider an example image below : % -% <> +% <> % % In this image, there are a few shapes numbered from *0-5*. *2* and *2a* % denote the external and internal contours of the outermost box. @@ -257,7 +259,7 @@ % labelled in red color and the hierarchy they belongs to, in green color % (either 1 or 2). The order is same as the order OpenCV detects contours. % -% <> +% <> % % So consider first contour, i.e *contour-0*. It is *hierarchy-1*. It has two % holes, contours *1,2*, and they belong to *hierarchy-2*. So for *contour-0*, @@ -297,7 +299,7 @@ % contours as per the result given by OpenCV and analyze it. Again, red % letters give the contour number and green letters give the hierarchy order. % -% <> +% <> % % Take *contour-0*: It is in *hierarchy-0*. Next contour in same hierarchy is % *contour-7*. No previous contours. Child is *contour-1*. And no parent. So diff --git a/samples/convexhull_demo.m b/samples/convexhull_demo.m index 03e131c98..24898d245 100644 --- a/samples/convexhull_demo.m +++ b/samples/convexhull_demo.m @@ -1,7 +1,9 @@ -%% Convex Hull demo +%% Convex Hull % This sample program demonstrates the use of the |cv.convexHull| function. % -% +% Sources: +% +% * % %% Points diff --git a/samples/corner_subpixels_demo_gui.m b/samples/corner_subpixels_demo_gui.m index 7a8859daf..2c98a25d7 100644 --- a/samples/corner_subpixels_demo_gui.m +++ b/samples/corner_subpixels_demo_gui.m @@ -9,10 +9,12 @@ % * to find more exact corner % positions (more exact than integer pixels). % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % function varargout = corner_subpixels_demo_gui(im) diff --git a/samples/create_mask_demo_gui.m b/samples/create_mask_demo_gui.m index 68cb0ca1f..11221a937 100644 --- a/samples/create_mask_demo_gui.m +++ b/samples/create_mask_demo_gui.m @@ -5,7 +5,9 @@ % The program takes as input a source image and outputs its corresponding % mask image. % -% +% Sources: +% +% * % function varargout = create_mask_demo_gui(im) diff --git a/samples/create_mask_ipt_demo.m b/samples/create_mask_ipt_demo.m index 3db2641ac..c24bb3c37 100644 --- a/samples/create_mask_ipt_demo.m +++ b/samples/create_mask_ipt_demo.m @@ -3,10 +3,12 @@ % % This demo uses Image Processing Toolbox functions. % -% -% % See also: impoly, getline, roipoly % +% Sources: +% +% * +% % load an image, and display it src = imread(fullfile(mexopencv.root(),'test','fruits.jpg')); diff --git a/samples/dbt_face_detection_demo.m b/samples/dbt_face_detection_demo.m index 1740d5145..8022daa74 100644 --- a/samples/dbt_face_detection_demo.m +++ b/samples/dbt_face_detection_demo.m @@ -1,6 +1,8 @@ %% DBT Face Detection % -% +% Sources: +% +% * % %% Create detector diff --git a/samples/delaunay2_demo.m b/samples/delaunay2_demo.m index 1e9dfbb8a..2a7d8f590 100644 --- a/samples/delaunay2_demo.m +++ b/samples/delaunay2_demo.m @@ -5,7 +5,9 @@ % It draws a random set of points in an image and then % delaunay triangulates them. % -% +% Sources: +% +% * % function delaunay2_demo() diff --git a/samples/demos.xml b/samples/demos.xml new file mode 100644 index 000000000..9b86a3909 --- /dev/null +++ b/samples/demos.xml @@ -0,0 +1,1000 @@ + + + mexopencv + toolbox + HelpIcon.DEMOS + Examples to demonstrate how to use OpenCV in MATLAB. + Homepage + + + + + + video + web('https://www.youtube.com/watch?v=RQHQQWTqT8g&index=1&list=PLmcMMZCV897pofUfG1yH-8fcjpUTFUSaJ','-browser') + + + + video + web('https://www.youtube.com/watch?v=kRu1dLrJoMo&index=2&list=PLmcMMZCV897pofUfG1yH-8fcjpUTFUSaJ','-browser') + + + + video + web('https://www.youtube.com/watch?v=9VLZ3FpmdjM&index=3&list=PLmcMMZCV897pofUfG1yH-8fcjpUTFUSaJ','-browser') + + + + video + web('https://www.youtube.com/watch?v=IWNcFuAHHyY&index=4&list=PLmcMMZCV897pofUfG1yH-8fcjpUTFUSaJ','-browser') + + + + + + + + + M-file + RotatedRect_demo + + + + M-file + FileStorage_demo + + + + M-file + file_storage_demo + + + + M-GUI + linear_blending_demo_gui + + + + M-file + image_bitwise_demo + + + + M-GUI + linear_transform_demo_gui + + + + M-GUI + padding_demo_gui + + + + M-GUI + threshold_inrange_demo_gui + + + + M-file + dft_demo + + + + + + + + + M-file + drawing_demo + + + + M-file + drawing_basic_demo + + + + M-file + draw_text_demo + + + + M-GUI + falsecolor_demo_gui + + + + M-GUI + delaunay2_demo + + + + M-GUI + hull_demo_gui + + + + M-file + contours_demo + + + + M-file + contours2_demo + + + + M-GUI + watershed_demo_gui + + Image Processing Toolbox + + + + M-file + contours_hierarchy_demo + + + + M-GUI + findContours_demo_gui + + + + M-GUI + ffilldemo_gui + + + + M-file + floodfill_demo + + + + M-file + thresholding_demo + + Image Processing Toolbox + + + + M-GUI + threshold_demo_gui + + + + M-file + smoothing_demo + + + + M-file + gausian_median_blur_bilateral_filter + + Statistics and Machine Learning Toolbox + + + + M-GUI + polar_transforms_demo + + + + M-GUI + morphology_demo_gui1 + + + + M-GUI + morphology_demo_gui2 + + + + M-file + morphology_demo_3 + + + + M-file + hitmiss_demo + + + + M-file + generalContours_demo1 + + + + M-file + generalContours_demo2 + + + + M-GUI + fitellipse_demo_gui + + + + M-GUI + fitline_demo_gui + + + + M-GUI + moments_demo_gui + + + + M-GUI + gabor_filter_gui + + + + M-file + gabor_demo + + Image Processing Toolbox + + + + M-file + lsd_lines_demo + + + + M-file + hough_lines_demo + + + + + + M-file + hough_circles_demo + + Image Processing Toolbox + + + + M-file + generalized_hough_demo + + + + M-GUI + grabcut_demo_gui + + Image Processing Toolbox + + + + M-file + pyramids_blending + + + + M-GUI + lap_pyr_demo_gui + + + + M-GUI + warp_perspective_demo_gui + + + + M-GUI + perspective_transform_gui + + Image Processing Toolbox + + + + M-file + image_alignment_demo + + Computer Vision System Toolbox + + + + M-file + generic_corner_detector_demo + + + + M-GUI + corner_subpixels_demo_gui + + + + M-file + mask_tmpl_demo + + Image Processing Toolbox + + + + M-GUI + phase_corr_demo + + + + M-file + convexhull_demo + + + + M-file + minarea_demo + + + + M-GUI + connected_components_demo_gui + + + + + + M-GUI + edge_demo_gui + + + + M-GUI + laplace_demo_gui + + + + M-GUI + create_mask_demo_gui + + Image Processing Toolbox + + + + M-file + create_mask_ipt_demo + + Image Processing Toolbox + + + + M-file + mouse_and_match_demo + + + + + + M-GUI + distrans_demo_gui + + + + M-GUI + hist_demo_gui + + + + M-file + histogram_calculation_demo + + + + + + M-file + histogram_equalization_demo + + Image Processing Toolbox + + + + M-file + histogram_comparison_demo + + + + M-file + histogram_2d_demo + + + + + + + + M-GUI + color_histogram_demo_gui + + + + M-GUI + backproject_demo_gui + + + + M-GUI + camshift_demo_gui + + + + M-file + camshift_track_demo + + Computer Vision System Toolbox + + + + + + M-GUI + clahe_demo_gui + + + + M-file + remap_demo + + + + M-file + remap_fun_demo + + + + M-file + warp_affine_demo + + + + M-GUI + pyramids_demo_gui + + + + M-file + sobel_derivatives_demo + + + + M-file + laplace_operator_demo + + + + M-file + pointPolygonTest_demo + + + + M-file + points_polygon_demo + + Statistics and Machine Learning Toolbox + + + + M-file + watershed_segmentation_demo + + Statistics and Machine Learning Toolbox + + + + M-GUI + weiner_deconvolution_demo_gui + + + + M-file + texture_flow_demo + + + + M-GUI + coherence_demo_gui + + + + M-file + turing_patterns_demo + + + + M-GUI + hi_res_browse_demo + + Image Processing Toolbox + + + + M-file + squares_detector_demo + + + + + + + + + M-file + image_similarity_demo + + + + + + + + + M-GUI + capture_demo_gui + + + + M-file + video_write_demo + + + + M-file + synth_video_demo + + + + + + + + + M-GUI + segment_objects_demo + + + + M-GUI + kalman_demo + + + + M-file + tvl1_optical_flow_demo + + Image Processing Toolbox + + + + M-GUI + fback_demo + + + + M-GUI + opt_flow_demo + + + + + + M-GUI + lucas_kanade_demo + + + + + + M-GUI + pyrlk_optical_flow_demo + + + + + + M-GUI + lk_demo_gui + + + + M-GUI + lk_track_demo + + + + + + M-GUI + lk_homography_demo + + + + + + + + + + + M-file + calibration_capture_demo + + + + M-file + calibration_demo + + + + M-file + calibration_pose_demo + + + + M-file + stereo_calibration_demo + + + + + + M-file + stereo_match_demo + + Computer Vision System Toolbox + + + + M-file + epipolar_geometry_demo + + Image Processing Toolbox + + + + + + + + + M-file + feature_detector_fast_demo + + + + M-file + orb_demo + + + + M-file + feature_homography_demo + + + + M-file + asift_demo + + Image Processing Toolbox + + + + M-file + akaze_match_demo + + + + M-file + planar_tracker_demo + + Image Processing Toolbox + + + + M-file + descriptors_compute_match_demo + + + + M-file + detect_blob_demo + + + + M-file + mser_demo + + + + M-file + plane_tracker_demo + + + + M-file + feature_homography_track_demo + + + + M-file + plane_ar_demo + + + + M-file + mosse_demo + + + + + + + + + M-GUI + facedetect + + + + M-GUI + face_eyes_detect_demo + + + + + + M-file + facial_features_demo + + + + M-GUI + smiledetect_demo + + + + + + M-GUI + dbt_face_detection_demo + + + + M-GUI + peopledetect_demo + + + + + + + + + M-file + caffe_googlenet_demo + + + + M-file + fcn_semsegm_demo + + + + M-file + dnn_image_classification_demo + + + + M-file + dnn_semantic_segmentation_demo + + + + M-file + dnn_object_detection_demo + + + + M-file + dnn_face_detector + + + + + + + + + M-file + kmeans_demo + + Statistics and Machine Learning Toolbox + + + + M-file + em_demo + + Statistics and Machine Learning Toolbox + + + + M-file + gaussian_mix_demo + + Statistics and Machine Learning Toolbox + + + + M-file + kmeans_color_quantize_demo + + + + M-file + classification_demo + + + + M-file + ml_demo + + Statistics and Machine Learning Toolbox + + + + M-file + logistic_regression_demo + + Image Processing Toolbox + + Statistics and Machine Learning Toolbox + + + + M-file + svm_intro_demo + + Statistics and Machine Learning Toolbox + + + + M-file + knn_demo + + Statistics and Machine Learning Toolbox + + + + M-file + pca_intro_demo + + + + M-file + pca_demo + + Image Processing Toolbox + + + + M-GUI + train_svmsgd_demo_gui + + + + M-file + knn_ocr_digits_demo + + Image Processing Toolbox + + + + M-file + knn_ocr_letters_demo + + Statistics and Machine Learning Toolbox + + + + M-file + svm_hog_ocr_digits_demo + + Computer Vision System Toolbox + + Statistics and Machine Learning Toolbox + + Image Processing Toolbox + + + + + + + + + M-file + hdr_imaging_demo + + Image Processing Toolbox + + + + M-GUI + inpaint_demo + + Image Processing Toolbox + + + + M-file + npr_demo + + + + M-file + cloning_demo + + + + M-file + non_local_means_demo + + Image Processing Toolbox + + + + + + + + + M-file + stitching_demo + + + + M-file + stitching_detailed_demo + + Computer Vision System Toolbox + + Image Processing Toolbox + + + + + + + + + M-file + shape_context_demo + + + + + + + + + M-file + super_resolution_demo + + + + + + + + + M-file + videostab_demo + + Computer Vision System Toolbox + + + + diff --git a/samples/descriptors_compute_match_demo.m b/samples/descriptors_compute_match_demo.m index ed852e5a8..dc19bb3a0 100644 --- a/samples/descriptors_compute_match_demo.m +++ b/samples/descriptors_compute_match_demo.m @@ -2,7 +2,9 @@ % This program demonstrates how to detect, compute, and match descriptors % using various algorithms: ORB, BRISK, and AKAZE. % -% +% Sources: +% +% * % %% diff --git a/samples/detect_blob_demo.m b/samples/detect_blob_demo.m index bb2573e1c..88f064da1 100644 --- a/samples/detect_blob_demo.m +++ b/samples/detect_blob_demo.m @@ -1,7 +1,9 @@ -%% Blob Detection demo +%% Blob Detection % This program demonstrates how to use BLOB to detect and filter region. % -% +% Sources: +% +% * % %% Image diff --git a/samples/dft_demo.m b/samples/dft_demo.m index e31d83719..fb806ea32 100644 --- a/samples/dft_demo.m +++ b/samples/dft_demo.m @@ -7,10 +7,12 @@ % * What is a Fourier transform and why use it? % * Usage of OpenCV functions such as: |cv.dft| and |cv.getOptimalDFTSize|. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Theory @@ -23,7 +25,8 @@ % images Fourier transform is: % % $$F(k,l) = \displaystyle\sum\limits_{i=0}^{N-1}\sum\limits_{j=0}^{N-1} -% f(i,j) e^{-i2\pi(\frac{ki}{N} + \frac{lj}{N})}$$, +% f(i,j) e^{-i2\pi(\frac{ki}{N} + \frac{lj}{N})}$$ +% % $$e^{ix} = \cos{x} + i\sin {x}$$ % % Here $f$ is the image value in its spatial domain and $F$ in its frequency @@ -158,11 +161,11 @@ % % In case of the horizontal text: % -% <> +% <> % % In case of a rotated text: % -% <> +% <> % % You can see that the most influential components of the frequency domain % (brightest dots on the magnitude image) follow the geometric rotation of diff --git a/samples/distrans_demo_gui.m b/samples/distrans_demo_gui.m index 075f78c62..b4e3d50b3 100644 --- a/samples/distrans_demo_gui.m +++ b/samples/distrans_demo_gui.m @@ -1,4 +1,4 @@ -%% Distance Transform Demo +%% Distance Transform % Program to demonstrate the use of the distance transform function % between edge images. % @@ -20,8 +20,10 @@ % * |CComp|: switch to Voronoi diagram mode. % * |Pixel|: switch to pixel-based Voronoi diagram mode. % -% , -% +% Sources: +% +% * +% * % function varargout = distrans_demo_gui(im) diff --git a/samples/dnn_face_detector.m b/samples/dnn_face_detector.m new file mode 100644 index 000000000..3e2753246 --- /dev/null +++ b/samples/dnn_face_detector.m @@ -0,0 +1,110 @@ +%% DNN: Face Detection +% Face detector based on SSD framework (Single Shot MultiBox Detector), +% using a reduced ResNet-10 model. +% +% Sources: +% +% * +% + +%% +% import network +[net, blobOpts] = ResNetSSD_FaceDetector(); +assert(~net.empty()); + +%% +% minimum confidence threshold of detections to show +confThreshold = 0.5; + +%% +% prepare video input +cap = cv.VideoCapture(); +pause(1); +assert(cap.isOpened()); + +%% +% prepare figure +frame = cap.read(); +assert(~isempty(frame)); +hImg = imshow(frame); +sz = [size(frame,2) size(frame,1)]; + +%% +% video feed +while ishghandle(hImg) + % read frame + frame = cap.read(); + if isempty(frame), break; end + + % detect faces + net.setInput(cv.Net.blobFromImages(flip(frame,3), blobOpts{:})); + detections = net.forward(); % SSD output is 1-by-1-by-ndetections-by-7 + detections = permute(detections, [3 4 2 1]); + + % draw detections + for i=1:size(detections,1) + % only strong detections + d = detections(i,:); + if d(2) == 1 && d(3) > confThreshold % 0: background, 1: face + % plot bounding boxes (coordinates are relative to image size) + frame = cv.rectangle(frame, d(4:5).*sz, d(6:7).*sz, ... + 'Color',[0 255 0], 'Thickness',2); + frame = cv.putText(frame, sprintf('conf = %3.0f%%', d(3)*100), ... + d(4:5).*sz - [0 4], 'Color',[255 0 0], 'FontScale',0.5); + end + end + + % show inference timing + [~,t] = net.getPerfProfile(); + t = double(t) / cv.TickMeter.getTickFrequency(); + frame = cv.putText(frame, sprintf('time = %g sec', t), [10 20], ... + 'Color',[255 255 0], 'FontScale',0.5); + + % update plot + set(hImg, 'CData',frame); + drawnow; +end +cap.release(); + +%% +% Helper function + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function [net, blobOpts] = ResNetSSD_FaceDetector() + %RESNETSSD_FACEDETECTOR face detector based on SSD framework with reduced ResNet-10 backbone + % + % homepage = https://github.com/opencv/opencv/blob/3.3.1/samples/dnn/face_detector/how_to_train_face_detector.txt + % + % ## Model + % + % file = test/dnn/ResNetSSD_FaceDetector/deploy.prototxt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/dnn/face_detector/deploy.prototxt + % hash = 5fd52177a483cbac12fd61e9ecd87c762829ecbe + % + % ## Weights + % + % file = test/dnn/ResNetSSD_FaceDetector/res10_300x300_ssd_iter_140000.caffemodel + % url = https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel + % hash = 15aa726b4d46d9f023526d85537db81cbc8dd566 + % size = 10.1 MB + % + + dname = get_dnn_dir('ResNetSSD_FaceDetector'); + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'res10_300x300_ssd_iter_140000.caffemodel')); + blobOpts = {'SwapRB',false, 'Crop',false, 'Size',[300 300], 'Mean',[104 117 123]}; +end diff --git a/samples/dnn_image_classification_demo.m b/samples/dnn_image_classification_demo.m new file mode 100644 index 000000000..2ff704df4 --- /dev/null +++ b/samples/dnn_image_classification_demo.m @@ -0,0 +1,352 @@ +%% DNN Image Classification +% +% * +% * +% * +% * +% * +% * +% * +% +% Sources: +% +% * +% * +% * +% * +% + +function dnn_image_classification_demo(im, name, crop) + % input image (BGR channel order) + if nargin < 1 || isempty(im) + im = fullfile(mexopencv.root(), 'test', 'space_shuttle.jpg'); + end + img = cv.imread(im, 'Color',true, 'FlipChannels',false); + + % import pretrained model + if nargin < 2, name = 'GoogLeNet'; end + fprintf('Load model... '); tic; + switch lower(name) + case 'alexnet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = AlexNet(); + case 'caffenet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = CaffeNet(); + case 'vggnet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = VGGNet(); + case 'inception' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = Inception(); + case 'googlenet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = GoogLeNet(); + case 'resnet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = ResNet(); + case 'squeezenet' + % ImageNet ILSVRC 2012 + [net, labels, blobOpts] = SqueezeNet('v1.1'); + otherwise + error('Unrecognized model %s', name) + end + toc; + assert(~net.empty(), 'Failed to read network %s', name); + + % feed image to network + if nargin < 3, crop = true; end + blobOpts = ['Crop',crop, blobOpts]; + blob = cv.Net.blobFromImages(img, blobOpts{:}); + net.setInput(blob); + + % run forward pass + fprintf('Forward pass... '); tic; + prob = net.forward(); % 1-by-nclasses-by-1-by-1 + toc; + + % prepare output image + out = flip(img, 3); % BGR to RGB + + % prediction: image classification and top-5 predictions + [~,ord] = sort(prob, 'descend'); + disp('Top-5 predictions:') + for i=1:5 + fprintf('%6.2f%% = %s\n', prob(ord(i))*100, labels{ord(i)}); + end + imshow(out), title('Image Classification') + xlabel(labels{ord(1)}) +end + +% --- Helper functions --- + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function labels = readLabels(labelsFile, skipFirstWord) + if nargin < 2, skipFirstWord = false; end + if ~mexopencv.isOctave() + fid = fopen(labelsFile, 'rt'); + C = textscan(fid, '%s', 'Delimiter','\n'); + fclose(fid); + labels = C{1}; + else + %HACK: textscan is buggy and unreliable in Octave! + labels = textread(labelsFile, '%s', 'Delimiter','\n'); + end + if skipFirstWord + labels = regexprep(labels, '^\w+\s*', '', 'once'); + end +end + +% --- Pretrained models --- +% See also: https://github.com/opencv/opencv_extra/blob/3.3.1/testdata/dnn/download_models.py + +function [net, labels, blobOpts] = AlexNet() + %ALEXNET BAIR/BVLC AlexNet Model [Caffe] + % + % homepage = https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet + % + % ## Model + % + % file = test/dnn/AlexNet/deploy.prototxt + % url = https://github.com/BVLC/caffe/raw/master/models/bvlc_alexnet/deploy.prototxt + % hash = cb77655eb4db32c9c47699c6050926f9e0fc476a + % + % ## Weights + % + % file = test/dnn/AlexNet/bvlc_alexnet.caffemodel + % url = http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel + % hash = 9116a64c0fbe4459d18f4bb6b56d647b63920377 + % size = 232 MB + % + % ## Classes + % + % file = test/dnn/AlexNet/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + dname = get_dnn_dir('AlexNet'); + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'bvlc_alexnet.caffemodel')); + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[227 227], 'Mean',[104 117 123]}; +end + +function [net, labels, blobOpts] = CaffeNet() + %CAFFENET BAIR/BVLC CaffeNet Model, a replication of AlexNet with some modification [Caffe] + % + % homepage = https://github.com/BVLC/caffe/tree/master/models/bvlc_reference_caffenet + % + % ## Model + % + % file = test/dnn/CaffeNet/deploy.prototxt + % url = https://github.com/BVLC/caffe/raw/master/models/bvlc_reference_caffenet/deploy.prototxt + % hash = 6a40fd8b77233afee8fd525ce59bb4ccaae78d58 + % + % ## Weights + % + % file = test/dnn/CaffeNet/bvlc_reference_caffenet.caffemodel + % url = http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel + % hash = 4c8d77deb20ea792f84eb5e6d0a11ca0a8660a46 + % size = 232 MB + % + % ## Classes + % + % file = test/dnn/CaffeNet/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + dname = get_dnn_dir('CaffeNet'); + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'bvlc_reference_caffenet.caffemodel')); + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[227 227], 'Mean',[104 117 123]}; +end + +function [net, labels, blobOpts] = VGGNet() + %VGGNET VGG team ILSVRC-2014 16-layer model [Caffe] + % + % homepage = http://www.robots.ox.ac.uk/~vgg/research/very_deep/ + % + % ## Model + % + % file = test/dnn/VGGNet/VGG_ILSVRC_16_layers_deploy.prototxt + % url = https://gist.githubusercontent.com/ksimonyan/211839e770f7b538e2d8/raw/0067c9b32f60362c74f4c445a080beed06b07eb3/VGG_ILSVRC_16_layers_deploy.prototxt + % hash = 2734e5500f1445bd7c9fee540c99f522485247bd + % + % ## Weights + % + % file = test/dnn/VGGNet/VGG_ILSVRC_16_layers.caffemodel + % url = http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel + % hash = 9363e1f6d65f7dba68c4f27a1e62105cdf6c4e24 + % size = 527 MB + % + % ## Classes + % + % file = test/dnn/VGGNet/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + dname = get_dnn_dir('VGGNet'); + net = cv.Net('Caffe', ... + fullfile(dname, 'VGG_ILSVRC_16_layers_deploy.prototxt'), ... + fullfile(dname, 'VGG_ILSVRC_16_layers.caffemodel')); + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[224 224], 'Mean',[103.939 116.779 123.68]}; +end + +function [net, labels, blobOpts] = Inception() + %INCEPTION Google Inception V1 model [TensorFlow] + % + % homepage = https://github.com/tensorflow/tensorflow + % + % ## Model + Weights + Classes + % + % file = test/dnn/Inception/tensorflow_inception_graph.pb + % file = test/dnn/Inception/imagenet_comp_graph_label_strings.txt + % url = https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip + % hash = e3b84c7e240ce8025b30d868f5e840b4bba9761d + % size = 47.6 MB + % + + dname = get_dnn_dir('Inception'); + net = cv.Net('Tensorflow', ... + fullfile(dname, 'tensorflow_inception_graph.pb')); + labels = readLabels(fullfile(dname, 'imagenet_comp_graph_label_strings.txt'), false); + blobOpts = {'SwapRB',true, 'Size',[224 224], 'Mean',[117 117 117]}; +end + +function [net, labels, blobOpts] = GoogLeNet() + %GOOGLENET BAIR/BVLC GoogleNet Model, a Caffe implementation of Inception model [Caffe] + % + % homepage = https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet + % + % ## Model + % + % file = test/dnn/GoogLeNet/deploy.prototxt + % url = https://github.com/BVLC/caffe/raw/master/models/bvlc_googlenet/deploy.prototxt + % hash = 7060345c8012294baa60eeb5901d2d3fd89d75fc + % + % ## Weights + % + % file = test/dnn/GoogLeNet/bvlc_googlenet.caffemodel + % url = http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel + % hash = 405fc5acd08a3bb12de8ee5e23a96bec22f08204 + % size = 51 MB + % + % ## Classes + % + % file = test/dnn/GoogLeNet/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + dname = get_dnn_dir('GoogLeNet'); + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'bvlc_googlenet.caffemodel')); + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[224 224], 'Mean',[104 117 123]}; +end + +function [net, labels, blobOpts] = ResNet() + %RESNET Deep Residual Networks, ResNet-50 [Caffe] + % + % homepage = https://github.com/KaimingHe/deep-residual-networks + % + % ## Model + % + % file = test/dnn/ResNet/ResNet-50-deploy.prototxt + % url = https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777 + % hash = 5d6fd5aeadd8d4684843c5028b4e5672b9e51638 + % + % ## Weights + % + % file = test/dnn/ResNet/ResNet-50-model.caffemodel + % url = https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777 + % hash = b7c79ccc21ad0479cddc0dd78b1d20c4d722908d + % size = 97.7 MB + % + % ## Classes + % + % file = test/dnn/ResNet/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + dname = get_dnn_dir('ResNet'); + net = cv.Net('Caffe', ... + fullfile(dname, 'ResNet-50-deploy.prototxt'), ... + fullfile(dname, 'ResNet-50-model.caffemodel')); + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[224 224], 'Mean',[104 117 123]}; + %TODO: mean image from ResNet_mean.binaryproto +end + +function [net, labels, blobOpts] = SqueezeNet(v) + %SQUEEZENET SqueezeNet [Caffe] + % + % homepage = https://github.com/DeepScale/SqueezeNet + % + % # SqueezeNet v1.0 + % + % ## Model + % + % file = test/dnn/SqueezeNet/v1.0/deploy.prototxt + % url = https://github.com/DeepScale/SqueezeNet/raw/master/SqueezeNet_v1.0/deploy.prototxt + % hash = 733249be856b9cd28ce929cd7c41874cf817c3c6 + % + % ## Weights + % + % file = test/dnn/SqueezeNet/v1.0/squeezenet_v1.0.caffemodel + % url = https://github.com/DeepScale/SqueezeNet/raw/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel + % hash = 579d0beb658e43c45937bf8bb5e4034fea4e1f69 + % size = 4.76 MB + % + % # SqueezeNet v1.1 + % + % ## Model + % + % file = test/dnn/SqueezeNet/v1.1/deploy.prototxt + % url = https://github.com/DeepScale/SqueezeNet/raw/master/SqueezeNet_v1.1/deploy.prototxt + % hash = c226bbaaa4d83e2a3d3727618091e897e5f2e3aa + % + % ## Weights + % + % file = test/dnn/SqueezeNet/v1.1/squeezenet_v1.1.caffemodel + % url = https://github.com/DeepScale/SqueezeNet/raw/master/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel + % hash = 3397f026368a45ae236403ccc81cfcbe8ebe1bd0 + % size = 4.72 MB + % + % # Classes + % + % file = test/dnn/SqueezeNet/v1.x/synset_words.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/synset_words.txt + % + + v = validatestring(v, {'v1.1', 'v1.0'}); + dname = get_dnn_dir(fullfile('SqueezeNet', v)); + if strcmp(v, 'v1.1') + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'squeezenet_v1.1.caffemodel')); + else + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'squeezenet_v1.0.caffemodel')); + end + labels = readLabels(fullfile(dname, 'synset_words.txt'), true); + blobOpts = {'SwapRB',false, 'Size',[227 227], 'Mean',[104 117 123]}; +end diff --git a/samples/dnn_object_detection_demo.m b/samples/dnn_object_detection_demo.m new file mode 100644 index 000000000..d2fcd496e --- /dev/null +++ b/samples/dnn_object_detection_demo.m @@ -0,0 +1,504 @@ +%% DNN Object Detection +% +% This sample uses Single-Shot Detector (SSD) or You Only Look Once (YOLO) to +% detect objects on image (produces bounding boxes and corresponding labels). +% +% * +% * +% * +% * +% * +% * +% +% Sources: +% +% * +% * +% * +% * +% * +% + +function dnn_object_detection_demo(im, name, crop, min_conf) + % input image (BGR channel order) + if nargin < 1 || isempty(im) + im = fullfile(mexopencv.root(), 'test', 'rgb.jpg'); + end + img = cv.imread(im, 'Color',true, 'FlipChannels',false); + + % import pretrained model + if nargin < 2, name = 'MobileNetSSD'; end + fprintf('Load model... '); tic; + switch lower(name) + case 'vggnetssd' + % PASCAL VOC or ImageNet ILSVRC 2016 + [net, labels, blobOpts] = VGGNetSSD('VOC'); + case 'mobilenetssd' + % PASCAL VOC or Microsoft COCO + [net, labels, blobOpts] = MobileNetSSD('VOC'); + case 'yolo' + % PASCAL VOC or Microsoft COCO + [net, labels, blobOpts] = YOLO('VOC', true); + otherwise + error('Unrecognized model %s', name) + end + toc; + assert(~net.empty(), 'Failed to read network %s', name); + + % feed image to network + if nargin < 3, crop = false; end + blobOpts = ['Crop',crop, blobOpts]; + opts = parseBlobOpts(blobOpts{:}); + blob = cv.Net.blobFromImages(img, blobOpts{:}); + net.setInput(blob); + + % run forward pass + fprintf('Forward pass... '); tic; + detections = net.forward(); + toc; + + % prepare output image + if opts.Crop + % center cropped as fed to network + out = cropImage(img, opts); + else + if false + % resized image (squashed) as fed to network + out = imageFromBlob(blob, opts); + else + % unmodified original image + out = img; + end + end + out = flip(out, 3); % BGR to RGB + + % build detections struct (adjust relative bounding boxes to image size) + detections = processOutput(detections, name, [size(out,2) size(out,1)]); + + % filter-out weak detections according to a minimum confidence threshold + if nargin < 4, min_conf = 0.2; end + detections = detections([detections.confidence] >= min_conf); + + % localization: show bounding boxes + for i=1:numel(detections) + d = detections(i); + idx = find([labels.id] == d.class_id, 1, 'first'); + if isempty(idx), continue; end + out = insertAnnotation(out, d.rect, ... + sprintf('%s: %.2f', labels(idx).name, d.confidence), ... + 'Color',labels(idx).color, 'TextColor',[255 255 255], ... + 'Thickness',2, 'FontScale',0.3); + fprintf('Image %d: (%3.0f%%) %11s at [%3d %3d %3d %3d]\n', ... + d.img_id, d.confidence*100, labels(idx).name, d.rect); + end + imshow(out), title('Object Detection') +end + +% --- Helper functions --- + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function labels = readLabelsColors(labelsFile, addBG) + if nargin < 2, addBG = false; end + fid = fopen(labelsFile, 'rt'); + C = textscan(fid, '%s %d %d %d', 'CollectOutput',true); + fclose(fid); + name = C{1}; + color = uint8(C{2}); + if addBG + name = ['background'; name]; + color = [0 0 0; color]; + end + id = 0:(numel(name) - 1); % first label 0 corresponds to background + labels = struct('id',num2cell(id(:),2), 'name',name, 'color',num2cell(color,2)); +end + +function labels = readLabels(labelsFile, addBG) + if nargin < 2, addBG = true; end + fid = fopen(labelsFile, 'rt'); + C = textscan(fid, '%s'); + fclose(fid); + name = C{1}; + if addBG + name = ['background'; name]; + end + id = 0:(numel(name) - 1); % first label 0 corresponds to background + color = uint8([0 0 0; 255 * lines(numel(name)-1)]); + labels = struct('id',num2cell(id(:),2), 'name',name, 'color',num2cell(color,2)); +end + +function labels = readLabelsProtoTxt(labelsFile, addBG) + if nargin < 2, addBG = true; end + % item { + % name: "abc" + % label: 123 OR id: 123 + % display_name: "xyz" + % } + fid = fopen(labelsFile, 'rt'); + C = textscan(fid, '%*s %*c\n %*s %*q\n %*s %d\n %*s %q\n %*c'); + fclose(fid); + id = C{1}; + name = C{2}; + if addBG + id = [0; id]; + name = ['background'; name]; + end + color = uint8([0 0 0; 255 * lines(numel(name)-1)]); + labels = struct('id',num2cell(id,2), 'name',name, 'color',num2cell(color,2)); + [~,ord] = sort(id); + labels = labels(ord); +end + +function opts = parseBlobOpts(varargin) + p = inputParser(); + p.addParameter('ScaleFactor', 1.0); + p.addParameter('Size', [0 0]); % [w,h] + p.addParameter('Mean', [0 0 0]); % [r,g,b] + p.addParameter('SwapRB', true); + p.addParameter('Crop', true); + p.parse(varargin{:}); + opts = p.Results; +end + +function img = imageFromBlob(blob, opts) + img = permute(blob, [3 4 2 1]); % NCHW -> HWCN + img = img / opts.ScaleFactor; + if false && opts.SwapRB + opts.Mean([1 3]) = opts.Mean([3 1]); + end + img = bsxfun(@plus, img, reshape(opts.Mean, 1, 1, [])); + img = uint8(round(img)); +end + +function img = cropImage(img, opts) + % https://github.com/opencv/opencv/blob/3.3.1/modules/dnn/src/dnn.cpp#L95-L176 + imgSz = [size(img,2) size(img,1)]; + if ~isequal(imgSz, opts.Size) + if opts.Crop + % resize (preserving aspect-ratio) with center-cropping + sf = max(opts.Size ./ imgSz); + img = cv.resize(img, sf, sf); + imgSz = [size(img,2) size(img,1)]; + r = [fix((imgSz - opts.Size)/2) opts.Size]; + img = cv.Rect.crop(img, r); + else + % direct resize (stretched) without cropping + img = cv.resize(img, opts.Size); + end + end +end + +function detections = processOutput(output, name, sz) + isYOLO = strcmpi(name, 'yolo'); + if isYOLO + % YOLO output is already ndetections-by-25-by-1-by-1 + % (20+5 for VOC, 80+5 for COCO) + else + % SSD output is 1-by-1-by-ndetections-by-7 + output = permute(output, [3 4 2 1]); + end + num = size(output,1); + + % note: bounding boxes returned are percentages relative to image size + detections = struct('img_id',[], 'class_id',[], 'confidence',[], 'rect',[]); + detections = repmat(detections, num, 1); + for i=1:num + if isYOLO + % (center_x, center_y, width, height, unused_t0, probability_for_each_class[20]) + rrect = struct('center',output(i,1:2) .* sz, ... + 'size',output(i,3:4) .* sz, 'angle',0); + detections(i).rect = cv.RotatedRect.boundingRect(rrect); + [detections(i).confidence, detections(i).class_id] = max(output(i,6:end)); + detections(i).img_id = 0; + else + % (img_id, class_id, confidence, xLeftBottom, yLeftBottom, xRightTop, yRightTop) + detections(i).img_id = output(i,1); + detections(i).class_id = output(i,2); + detections(i).confidence = output(i,3); + detections(i).rect = round(cv.Rect.from2points(... + output(i,4:5) .* sz, output(i,6:7) .* sz)); + end + end +end + +function img = insertAnnotation(img, rect, str, varargin) + % See also: insertObjectAnnotation, insertShape, insertText + p = inputParser(); + p.addParameter('Alpha', 0.6); + p.addParameter('Thickness', 1); + p.addParameter('Color', [255 255 0]); + p.addParameter('TextColor', [0 0 0]); + p.addParameter('FontFace', 'HersheySimplex'); + p.addParameter('FontScale', 0.4); + p.addParameter('AntiAlias', true); + p.addParameter('Shape', 'rectangle'); + p.parse(varargin{:}); + opts = p.Results; + opts.Shape = validatestring(opts.Shape, {'rectangle','circle'}); + thick = 1; + + [sz,b] = cv.getTextSize(str, 'Thickness',thick, ... + 'FontFace',opts.FontFace, 'FontScale',opts.FontScale); + txt_rect = [rect(1), rect(2)-sz(2)-b, sz(1), sz(2)+b]; + txt_orig = [rect(1), rect(2)-b]; + + if opts.AntiAlias + alias = {'LineType','AA'}; + else + alias = {'LineType',8}; + end + + overlay = img; + if strcmp(opts.Shape, 'rectangle') + overlay = cv.rectangle(overlay, rect, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + else + c = rect(1:2) + rect(3:4)/2; + r = max(rect(3:4)/2); + overlay = cv.circle(overlay, c, r, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + end + overlay = cv.rectangle(overlay, txt_rect, ... + 'Color',opts.Color, 'Thickness','Filled', alias{:}); + if opts.Thickness > 1 + overlay = cv.rectangle(overlay, txt_rect, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + end + overlay = cv.putText(overlay, str, txt_orig, ... + 'FontFace',opts.FontFace, 'FontScale',opts.FontScale, ... + 'Color',opts.TextColor, 'Thickness',thick, alias{:}); + + img = cv.addWeighted(img,1-opts.Alpha, overlay,opts.Alpha, 0); +end + +% --- Pretrained models --- +% See also: https://github.com/opencv/opencv_extra/blob/3.3.1/testdata/dnn/download_models.py + +function [net, labels, blobOpts] = VGGNetSSD(imageset) + %VGGNETSSD Single Shot MultiBox Detector, VGGNet-SSD + % + % homepage = https://github.com/weiliu89/caffe/tree/ssd + % + % # SSD300 (VGG16) PASCAL VOC 07+12 [Caffe] + % + % ## Model + Weights + % + % file = test/dnn/VGGNetSSD/VOC/deploy.prototxt + % file = test/dnn/VGGNetSSD/VOC/VGG_VOC0712_SSD_300x300_iter_120000.caffemodel + % url = https://drive.google.com/open?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA + % hash = 3ba17aa493f045cd5e7452b93f159a383635b614 + % size = 93.0 MB + % + % ## Classes + % + % file = test/dnn/VGGNetSSD/VOC/pascal-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/pascal-classes.txt + % + % # SSD300 (VGG16) ILSVRC 2016 [Caffe] + % + % ## Model + Weights + % + % file = test/dnn/VGGNetSSD/ILSVRC/deploy.prototxt + % file = test/dnn/VGGNetSSD/ILSVRC/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel + % url = https://drive.google.com/open?id=0BzKzrI_SkD1_a2NKQ2d1d043VXM + % hash = f77a8cac14e73a6b053fea51521ecea418ef2bf1 + % size = 178 MB + % + % ## Classes + % + % file = test/dnn/VGGNetSSD/ILSVRC/labelmap_ilsvrc_det.prototxt + % url = https://github.com/weiliu89/caffe/raw/ssd/data/ILSVRC2016/labelmap_ilsvrc_det.prototxt + % hash = 2f052e8260efb8eeca1ff6a64ce56d4e71b4a8f8 + % + + % (VOC: 20 classes, ILSVRC: 200 classes, http://image-net.org/challenges/LSVRC/2016/browse-det-synsets) + imageset = validatestring(imageset, {'VOC', 'ILSVRC'}); + dname = get_dnn_dir(fullfile('VGGNetSSD', imageset)); + blobOpts = {'SwapRB',false, 'Size',[300 300], 'Mean',[104 117 123]}; + if strcmp(imageset, 'VOC') + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'VGG_VOC0712_SSD_300x300_iter_120000.caffemodel')); + labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); + else + net = cv.Net('Caffe', ... + fullfile(dname, 'ssd_vgg16.prototxt'), ... + fullfile(dname, 'VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel')); + labels = readLabelsProtoTxt(fullfile(dname, 'labelmap_ilsvrc_det.prototxt'), false); + end +end + +function [net, labels, blobOpts] = MobileNetSSD(imageset) + %MOBILENETSSD Single-Shot Detector, MobileNet-SSD + % + % # A Caffe implementation of MobileNet-SSD, PASCAL VOC 07+12 [Caffe] + % + % homepage = https://github.com/chuanqi305/MobileNet-SSD + % + % ## Model + % + % file = test/dnn/MobileNetSSD/MobileNetSSD_deploy.prototxt + % url = https://github.com/chuanqi305/MobileNet-SSD/raw/master/MobileNetSSD_deploy.prototxt + % hash = d77c9cf09619470d49b82a9dd18704813a2043cd + % + % ## Weights + % + % file = test/dnn/MobileNetSSD/MobileNetSSD_deploy.caffemodel + % url = https://drive.google.com/open?id=0B3gersZ2cHIxRm5PMWRoTkdHdHc + % hash = 994d30a8afaa9e754d17d2373b2d62a7dfbaaf7a + % size = 22.0 MB + % + % ## Classes + % + % file = test/dnn/MobileNetSSD/pascal-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/pascal-classes.txt + % + % # MobileNet-SSD, trained on COCO dataset [TensorFlow] + % + % homepage = https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md + % + % ## Model + % + % file = test/dnn/MobileNetSSD_COCO/ssd_mobilenet_v1_coco.pbtxt + % url = https://github.com/opencv/opencv_extra/raw/3.3.1/testdata/dnn/ssd_mobilenet_v1_coco.pbtxt + % hash = f58916645baac2511f521332fbd574a71b8f80bf + % + % ## Weights + % + % file = test/dnn/MobileNetSSD_COCO/frozen_inference_graph.pb + % url = http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_11_06_2017.tar.gz + % hash = a88a18cca9fe4f9e496d73b8548bfd157ad286e2 + % size = 122 MB + % + % ## Classes + % + % file = test/dnn/MobileNetSSD_COCO/mscoco_label_map.pbtxt + % url = https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt + % + + % Caffe model trained on VOC (20 classes), TensorFlow trained on COCO (80 classes) + imageset = validatestring(imageset, {'VOC', 'COCO'}); + dname = get_dnn_dir(fullfile('MobileNetSSD', imageset)); + blobOpts = {'Size',[300 300], 'Mean',[127.5 127.5 127.5], 'ScaleFactor',1/127.5}; + if strcmp(imageset, 'VOC') + net = cv.Net('Caffe', ... + fullfile(dname, 'MobileNetSSD_deploy.prototxt'), ... + fullfile(dname, 'MobileNetSSD_deploy.caffemodel')); + labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); + blobOpts = ['SwapRB',false, blobOpts]; + else + %TODO: sometimes crashes from OpenCL execution path, workaround: + % cv.Utils.setUseOptimized(false); net.setPreferableTarget('CPU'); + net = cv.Net('Tensorflow', ... + fullfile(dname, 'frozen_inference_graph.pb'), ... + fullfile(dname, 'ssd_mobilenet_v1_coco.pbtxt')); + labels = readLabelsProtoTxt(fullfile(dname, 'mscoco_label_map.pbtxt'), true); + blobOpts = ['SwapRB',true, blobOpts]; + end +end + +function [net, labels, blobOpts] = YOLO(imageset, isTiny) + %YOLO You Only Look Once, YOLO v2 [Darknet] + % + % homepage = https://pjreddie.com/darknet/yolo/ + % + % # Tiny YOLO (COCO) + % + % ## Model + % + % file = test/dnn/YOLO/tiny-yolo.cfg + % url = https://github.com/pjreddie/darknet/raw/master/cfg/tiny-yolo.cfg + % hash = 8d281f1f80162e44d4af5d0911bfdf5667f8f20d + % + % ## Weights + % + % file = test/dnn/YOLO/tiny-yolo.weights + % url = https://pjreddie.com/media/files/tiny-yolo.weights + % hash = b6c3ba64aa71af2fa78f964ce68e509ed0e75383 + % size = 42.8 MB + % + % # Tiny YOLO (VOC) + % + % ## Model + % + % file = test/dnn/YOLO/tiny-yolo-voc.cfg + % url = https://github.com/pjreddie/darknet/raw/master/cfg/tiny-yolo-voc.cfg + % hash = d26e2408ce4e20136278411760ba904d744fe5b5 + % + % ## Weights + % + % file = test/dnn/YOLO/tiny-yolo-voc.weights + % url = https://pjreddie.com/media/files/tiny-yolo-voc.weights + % hash = 24b4bd049fc4fa5f5e95f684a8967e65c625dff9 + % size = 60.5 MB + % + % # YOLO (COCO) + % + % ## Model + % + % file = test/dnn/YOLO/yolo.cfg + % url = https://github.com/pjreddie/darknet/raw/master/cfg/yolo.cfg + % hash = 6ee14a8b854ce37c55ef4f9def90303a7cb53906 + % + % ## Weights + % + % file = test/dnn/YOLO/yolo.weights + % url = https://pjreddie.com/media/files/yolo.weights + % hash = a2248b25c50307b4e2ba298473e05396e79df9d1 + % size = 194 MB + % + % # YOLO (VOC) + % + % ## Model + % + % file = test/dnn/YOLO/yolo-voc.cfg + % url = https://github.com/pjreddie/darknet/raw/master/cfg/yolo-voc.cfg + % hash = 0150a4b4f018955aa98f2e38ef29ba2104ba74ea + % + % ## Weights + % + % file = test/dnn/YOLO/yolo-voc.weights + % url = https://pjreddie.com/media/files/yolo-voc.weights + % hash = 1cc1a7f8ad12d563d85b76e9de025dc28ac397bb + % size = 193 MB + % + % # Classes + % + % file = test/dnn/YOLO/coco.names + % url = https://github.com/pjreddie/darknet/raw/master/data/coco.names + % + % file = test/dnn/YOLO/voc.names + % url = https://github.com/pjreddie/darknet/raw/master/data/voc.names + % + + % (VOC: 20 classes, COCO: 80 classes) + dname = get_dnn_dir('YOLO'); + imageset = validatestring(imageset, {'VOC', 'COCO'}); + if isTiny + prefix = 'tiny-'; + else + prefix = ''; + end + if strcmp(imageset, 'VOC') + suffix = '-voc'; + else + suffix = ''; + end + net = cv.Net('Darknet', ... + fullfile(dname, [prefix 'yolo' suffix '.cfg']), ... + fullfile(dname, [prefix 'yolo' suffix '.weights'])); + labels = readLabels(fullfile(dname, [lower(imageset) '.names']), true); + blobOpts = {'SwapRB',false, 'Size',[416 416], 'ScaleFactor',1/255}; +end diff --git a/samples/dnn_semantic_segmentation_demo.m b/samples/dnn_semantic_segmentation_demo.m new file mode 100644 index 000000000..69cdd4dd1 --- /dev/null +++ b/samples/dnn_semantic_segmentation_demo.m @@ -0,0 +1,227 @@ +%% DNN Semantic Segmentation +% +% This sample demonstrates semantic segmentation, where we label each pixel +% in the image with a category label. +% +% * +% * +% +% Sources: +% +% * +% * +% + +function dnn_semantic_segmentation_demo(im, name, crop) + % input image (BGR channel order) + if nargin < 1 || isempty(im) + im = fullfile(mexopencv.root(), 'test', 'rgb.jpg'); + end + img = cv.imread(im, 'Color',true, 'FlipChannels',false); + + % import pretrained model + if nargin < 2, name = 'FCN'; end + fprintf('Load model... '); tic; + switch lower(name) + case 'fcn' + % PASCAL VOC + [net, labels, blobOpts] = FCN(); + case 'enet' + % Cityscapes + [net, labels, blobOpts] = ENet(); + otherwise + error('Unrecognized model %s', name) + end + toc; + assert(~net.empty(), 'Failed to read network %s', name); + + % feed image to network + if nargin < 3, crop = true; end + blobOpts = ['Crop',crop, blobOpts]; + opts = parseBlobOpts(blobOpts{:}); + blob = cv.Net.blobFromImages(img, blobOpts{:}); + net.setInput(blob); + + % run forward pass + fprintf('Forward pass... '); tic; + score = net.forward(); % 1-by-nclasses-by-nrows-by-ncols + toc; + + % prepare output image + if opts.Crop + % center cropped as fed to network + out = cropImage(img, opts); + else + % resized image (squashed) as fed to network + out = imageFromBlob(blob, opts); + end + out = flip(out, 3); % BGR to RGB + + % pixel-wise segmentation (predict class with max score) + score = permute(score, [3 4 2 1]); % H-by-W-by-nclasses + [S,L] = max(score, [], 3); + + % show count of pixels per class + if ~mexopencv.isOctave() && mexopencv.require('stats') + disp('Pixel Segmentation Summary:') + tabulate({labels(L(:)).name}) + end + + % show segmentation with color-coded classes + rgb = reshape(cat(1, labels(L(:)).color), [size(L) 3]); % label2rgb + out = cv.addWeighted(out, 0.3, rgb, 0.7, 0.0); + imshow(out), title('Semantic Segmentation') + if ~mexopencv.isOctave() + % show label/score of current pixel in tooltips + disp('Move data cursor over pixels to see segmentation labels') + hDCM = datacursormode(gcf); + set(hDCM, 'Enable','on', 'SnapToDataVertex','on', ... + 'UpdateFcn',@(~,e) { + sprintf('Label: %s', labels(L(e.Position(2), e.Position(1))).name) + sprintf('Score: %g', S(e.Position(2), e.Position(1))) + }); + end + + % show legend of color-coded classes + lgnd = createLabelsLegend(labels); + figure, imshow(lgnd), title('Class Labels') +end + +% --- Helper functions --- + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function labels = readLabelsColors(labelsFile, addBG) + if nargin < 2, addBG = false; end + fid = fopen(labelsFile, 'rt'); + C = textscan(fid, '%s %d %d %d', 'CollectOutput',true); + fclose(fid); + name = C{1}; + color = uint8(C{2}); + if addBG + name = ['background'; name]; + color = [0 0 0; color]; + end + id = 0:(numel(name) - 1); % first label 0 corresponds to background + labels = struct('id',num2cell(id(:),2), 'name',name, 'color',num2cell(color,2)); +end + +function opts = parseBlobOpts(varargin) + p = inputParser(); + p.addParameter('ScaleFactor', 1.0); + p.addParameter('Size', [0 0]); % [w,h] + p.addParameter('Mean', [0 0 0]); % [r,g,b] + p.addParameter('SwapRB', true); + p.addParameter('Crop', true); + p.parse(varargin{:}); + opts = p.Results; +end + +function img = imageFromBlob(blob, opts) + img = permute(blob, [3 4 2 1]); % NCHW -> HWCN + img = img / opts.ScaleFactor; + if false && opts.SwapRB + opts.Mean([1 3]) = opts.Mean([3 1]); + end + img = bsxfun(@plus, img, reshape(opts.Mean, 1, 1, [])); + img = uint8(round(img)); +end + +function img = cropImage(img, opts) + % https://github.com/opencv/opencv/blob/3.3.1/modules/dnn/src/dnn.cpp#L95-L176 + imgSz = [size(img,2) size(img,1)]; + if ~isequal(imgSz, opts.Size) + if opts.Crop + % resize (preserving aspect-ratio) with center-cropping + sf = max(opts.Size ./ imgSz); + img = cv.resize(img, sf, sf); + imgSz = [size(img,2) size(img,1)]; + r = [fix((imgSz - opts.Size)/2) opts.Size]; + img = cv.Rect.crop(img, r); + else + % direct resize (stretched) without cropping + img = cv.resize(img, opts.Size); + end + end +end + +function img = createLabelsLegend(labels) + img = cell(numel(labels),1); + for i=1:numel(labels) + img{i} = repmat(reshape(labels(i).color, [1 1 3]), 20, 120, 1); + img{i} = cv.putText(img{i}, labels(i).name, [0 15], ... + 'Color',[1 1 1]*255, 'FontFace','HersheySimplex', 'FontScale',0.5); + end + img = cat(1, img{:}); +end + +% --- Pretrained models --- +% See also: https://github.com/opencv/opencv_extra/blob/3.3.1/testdata/dnn/download_models.py + +function [net, labels, blobOpts] = FCN() + %FCN Fully Convolutional Networks, FCN-8s PASCAL VOC [Caffe] + % + % homepage = https://github.com/shelhamer/fcn.berkeleyvision.org + % + % ## Model + % + % file = test/dnn/FCN/fcn8s-heavy-pascal.prototxt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/fcn8s-heavy-pascal.prototxt + % + % ## Weights + % + % file = test/dnn/FCN/fcn8s-heavy-pascal.caffemodel + % url = http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel + % hash = c449ea74dd7d83751d1357d6a8c323fcf4038962 + % size = 513 MB + % + % ## Classes + % + % file = test/dnn/FCN/pascal-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/pascal-classes.txt + % + + dname = get_dnn_dir('FCN'); + net = cv.Net('Caffe', ... + fullfile(dname, 'fcn8s-heavy-pascal.prototxt'), ... + fullfile(dname, 'fcn8s-heavy-pascal.caffemodel')); + labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); + blobOpts = {'SwapRB',false, 'Size',[500 500], 'Mean',[104.00699, 116.66877, 122.67892]}; +end + +function [net, labels, blobOpts] = ENet() + %ENET ENet on Cityscapes dataset [Torch] + % + % homepage = https://github.com/e-lab/ENet-training + % + % ## Model + Weights + % + % file = test/dnn/ENet/model-cityscapes.net + % url = https://github.com/e-lab/ENet-training/releases/download/v1.cs/model-cityscapes.net + % hash = b4123a73bf464b9ebe9cfc4ab9c2d5c72b161315 + % size = 3.08 MB + % + % ## Classes + % + % file = test/dnn/ENet/enet-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/enet-classes.txt + % + + dname = get_dnn_dir('ENet'); + net = cv.Net('Torch', ... + fullfile(dname, 'model-cityscapes.net')); + labels = readLabelsColors(fullfile(dname, 'enet-classes.txt'), false); + blobOpts = {'SwapRB',true, 'Size',[1024 512], 'ScaleFactor',1/255}; +end diff --git a/samples/drawing_basic_demo.m b/samples/drawing_basic_demo.m index 49c4437d8..8b7772aa4 100644 --- a/samples/drawing_basic_demo.m +++ b/samples/drawing_basic_demo.m @@ -1,4 +1,4 @@ -%% Basic Drawing +%% Basic Geometric Drawing % In this demo, we show how to: % % * Draw a line by using the function |cv.line| @@ -7,8 +7,10 @@ % * Draw a circle by using the function |cv.circle| % * Draw a filled polygon by using the function |cv.fillPoly| % -% , -% +% Sources: +% +% * +% * % %% diff --git a/samples/drawing_demo.m b/samples/drawing_demo.m index e6c3c09b8..f26884bc1 100644 --- a/samples/drawing_demo.m +++ b/samples/drawing_demo.m @@ -1,10 +1,12 @@ -%% Drawing demo +%% Demonstration of drawing functions % This program demonstrates OpenCV drawing and text output functions. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Options @@ -78,7 +80,7 @@ %% Polygons for i=1:NUMBER pts = arrayfun(@(~) [randi(xlims, [3 1]) randi(ylims, [3 1])], 1:2, ... - 'Uniform',false); + 'UniformOutput',false); thick = randi([1 10]); img = cv.polylines(img, pts, ... @@ -91,7 +93,7 @@ %% Filled Polygons for i=1:NUMBER pts = arrayfun(@(~) [randi(xlims, [3 1]) randi(ylims, [3 1])], 1:2, ... - 'Uniform',false); + 'UniformOutput',false); img = cv.fillPoly(img, pts, ... 'Color',clr(), lineType{:}); diff --git a/samples/edge_demo_gui.m b/samples/edge_demo_gui.m index 733b6fafd..eb1014944 100644 --- a/samples/edge_demo_gui.m +++ b/samples/edge_demo_gui.m @@ -1,14 +1,16 @@ -%% Canny Edge Detection demo +%% Canny Edge Detection % This sample demonstrates Canny edge detection. % % In this demo, we show how to use the OpenCV function |cv.Canny| to implement % the Canny Edge Detector. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Theory @@ -46,7 +48,8 @@ % -1 & 0 & +1 \cr % -2 & 0 & +2 \cr % -1 & 0 & +1 -% }}\right]$$; +% }}\right]$$ +% % $$G_{y} = \left[{\matrix{ % -1 & -2 & -1 \cr % 0 & 0 & 0 \cr @@ -57,7 +60,7 @@ % one of four possible angles (namely 0, 45, 90 or 135) % % $$G = \sqrt{ G_{x}^{2} + G_{y}^{2} }$$ -% and +% % $$\theta = \arctan(\frac{ G_{y} }{ G_{x} })$$ % % 3) _Non-maximum suppression_ is applied. This removes pixels that are not diff --git a/samples/em_demo.m b/samples/em_demo.m index 5c54ae2e7..e527cff22 100644 --- a/samples/em_demo.m +++ b/samples/em_demo.m @@ -1,6 +1,8 @@ -%% EM Clustering demo +%% EM Clustering % -% +% Sources: +% +% * % %% Options diff --git a/samples/epipolar_geometry_demo.m b/samples/epipolar_geometry_demo.m index 216762204..5fe643e1f 100644 --- a/samples/epipolar_geometry_demo.m +++ b/samples/epipolar_geometry_demo.m @@ -5,7 +5,9 @@ % * We will learn about the basics of multiview geometry % * We will see what is epipole, epipolar lines, epipolar constraint etc. % -% +% Sources: +% +% * % %% Theory @@ -25,7 +27,7 @@ % the image below which shows a basic setup with two cameras taking the image % of same scene. % -% <> +% <> % % If we are using only the left camera, we can't find the 3D point % corresponding to the point $x$ in image because every point on the line @@ -62,7 +64,7 @@ % relative to the first in global coordinates. See the image below % (Image courtesy: _Learning OpenCV_ by Gary Bradsky): % -% <> +% <> % % But we prefer measurements to be done in pixel coordinates, right? % Fundamental Matrix contains the same information as Essential Matrix in @@ -92,7 +94,7 @@ % % Below is the result we get: % -% <> +% <> % % You can see in the left image that all epilines are converging at a point % outside the image at right side. That meeting point is the epipole. @@ -188,6 +190,8 @@ function epipolar_geometry_demo() function [img1,img2] = drawlines(img1, img2, lines, pts1, pts2, clrs) %DRAWLINES Draw epilines and points on images % + % [img1,img2] = drawlines(img1, img2, lines, pts1, pts2, clrs) + % % ## Input % * __img1__ first image % * __img2__ second image diff --git a/samples/face_eyes_detect_demo.m b/samples/face_eyes_detect_demo.m index 83b426c20..3d52a2a7d 100644 --- a/samples/face_eyes_detect_demo.m +++ b/samples/face_eyes_detect_demo.m @@ -1,21 +1,24 @@ -%% Face and Eyes Detection demo +%% Face and Eyes Detection % % In this demo, we will learn the basics of face detection using Haar % Feature-based Cascade Classifiers, and how the same extends for eye % detection, etc. % -% This program demonstrates the cascade classifier, with either Haar or LBP -% features. This classifier can detect many kinds of rigid objects, once the -% appropriate classifier is trained. It's most known use is for faces. +% This program demonstrates the use of |cv.CascadeClassifier| class to detect +% objects (face + eyes). You can use Haar or LBP features. This classifier can +% detect many kinds of rigid objects, once the appropriate classifier is +% trained. It's most known use is for faces. % -% , -% , -% , -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * +% * +% * +% * % %% Theory @@ -35,7 +38,7 @@ % subtracting sum of pixels under white rectangle from sum of pixels under % black rectangle. % -% <> +% <> % % Now all possible sizes and locations of each kernel is used to calculate % plenty of features. (Just imagine how much computation it needs? Even a @@ -54,7 +57,7 @@ % place is irrelevant. So how do we select the best features out of 160000+ % features? It is achieved by *Adaboost*. % -% <> +% <> % % For this, we apply each and every feature on all the training images. For % each feature, it finds the best threshold which will classify the faces to @@ -215,7 +218,7 @@ faces = cascadeF.detect(gray, detectOpts{:}); if tryflip faces2 = cascadeF.detect(cv.flip(gray, 1), detectOpts{:}); - faces2 = cellfun(@(r) [w-r(1)-r(3) r(2:4)], faces2, 'Uniform',false); + faces2 = cellfun(@(r) [w-r(1)-r(3) r(2:4)], faces2, 'UniformOutput',false); faces = [faces(:); faces2(:)]; end toc diff --git a/samples/facedetect.m b/samples/facedetect.m index 604e3135e..2bb507616 100644 --- a/samples/facedetect.m +++ b/samples/facedetect.m @@ -1,4 +1,4 @@ -%% Face Detection demo +%% Face Detection % Here is an example that illustrates how to detect faces % in a live video stream. % diff --git a/samples/facial_features_demo.m b/samples/facial_features_demo.m index dc01dd379..85e2abc4a 100644 --- a/samples/facial_features_demo.m +++ b/samples/facial_features_demo.m @@ -16,7 +16,9 @@ % % * % -% +% Sources: +% +% * % function facial_features_demo(im) diff --git a/samples/falsecolor_demo_gui.m b/samples/falsecolor_demo_gui.m index 66deee8ae..5ccc954b8 100644 --- a/samples/falsecolor_demo_gui.m +++ b/samples/falsecolor_demo_gui.m @@ -2,7 +2,9 @@ % % Sample shows how to apply false color on a grayscale image. % -% +% Sources: +% +% * % function varargout = falsecolor_demo_gui(im) @@ -18,12 +20,12 @@ im = fullfile(mexopencv.root(), 'test', 'HappyFish.jpg'); img = cv.imread(im, 'Grayscale',true); end + img = createRandomImage(); elseif ischar(im) img = cv.imread(im, 'Grayscale',true); else img = im; end - img = createRandomImage(); % create the UI h = buildGUI(img); @@ -31,7 +33,7 @@ end function img = createRandomImage(thick, num) - %CREATERANDOMIMAGE Creates random image + %CREATERANDOMIMAGE Creates image with random shapes if nargin < 1, thick = 2; end if nargin < 2, num = 256; end diff --git a/samples/fback_demo.m b/samples/fback_demo.m index d3dd68cd5..66b0c9839 100644 --- a/samples/fback_demo.m +++ b/samples/fback_demo.m @@ -1,9 +1,11 @@ -%% Farneback Optical Flow demo +%% Farneback Optical Flow % This program demonstrates dense optical flow algorithm by Gunnar Farneback, % mainly the function |cv.calcOpticalFlowFarneback|. % It captures from the camera by default. % -% +% Sources: +% +% * % %% Input video diff --git a/samples/fcn_semsegm_demo.m b/samples/fcn_semsegm_demo.m index 0275e1d59..f320e28be 100644 --- a/samples/fcn_semsegm_demo.m +++ b/samples/fcn_semsegm_demo.m @@ -1,22 +1,23 @@ -%% Fully-Convolutional Networks Sematic Segmentation Demo +%% Fully-Convolutional Networks Semantic Segmentation Demo % % "Fully Convolutional Models for Semantic Segmentation", Jonathan Long, % Evan Shelhamer and Trevor Darrell, CVPR, 2015. % % -% +% Sources: +% +% * % %% Model files -dirDNN = fullfile(mexopencv.root(), 'test', 'dnn'); +dirDNN = fullfile(mexopencv.root(), 'test', 'dnn', 'FCN'); modelLabels = fullfile(dirDNN, 'pascal-classes.txt'); modelTxt = fullfile(dirDNN, 'fcn8s-heavy-pascal.prototxt'); -modelBin = fullfile(dirDNN, 'fcn8s-heavy-pascal.caffemodel'); % 513MB file - +modelBin = fullfile(dirDNN, 'fcn8s-heavy-pascal.caffemodel'); % 513 MB file files = {modelLabels, modelTxt, modelBin}; urls = { - 'https://cdn.rawgit.com/opencv/opencv/3.3.0/samples/data/dnn/pascal-classes.txt'; - 'https://cdn.rawgit.com/opencv/opencv/3.3.0/samples/data/dnn/fcn8s-heavy-pascal.prototxt'; + 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/pascal-classes.txt'; + 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/fcn8s-heavy-pascal.prototxt'; 'http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel'; }; if ~isdir(dirDNN), mkdir(dirDNN); end @@ -24,7 +25,7 @@ if exist(files{i}, 'file') ~= 2 disp('Downloading...') if i==3 - choice = questdlg({'Downloading a large Caffe model (>500MB).', ... + choice = questdlg({'Downloading a large Caffe model (513 MB).', ... 'Continue?'}, 'Download', 'OK', 'Cancel', 'OK'); assert(strcmp(choice, 'OK'), 'Download cancelled'); end @@ -36,64 +37,62 @@ fid = fopen(modelLabels, 'rt'); C = textscan(fid, '%s %d %d %d', 'CollectOutput',true); fclose(fid); -labels = C{1}; -colors = uint8(C{2}); +labels = C{1}; % (20 classes + background) +colors = uint8(C{2}); % 21x3 fprintf('%d classes\n', numel(labels)); %% Create and initialize network from Caffe model -net = cv.Net(); -net.import('Caffe', modelTxt, modelBin); +net = cv.Net('Caffe', modelTxt, modelBin); assert(~net.empty(), 'Cant load network'); %% Prepare blob % Set the network input (VOC-FCN8s was trained on 500x500 BGR-images) +crop = true; img = cv.imread(fullfile(mexopencv.root(), 'test', 'rgb.jpg'), ... 'Color',true, 'FlipChannels',false); -if true - blob = cv.Net.blobFromImages(img, 'Size',[500 500]); -elseif true - blob = cv.resize(img, [500 500]); - blob = permute(blob, [4 3 1 2]); - blob = single(blob); -else - blob = img; - blob = permute(blob, [4 3 1 2]); - blob = single(blob); -end +blob = cv.Net.blobFromImages(img, 'Size',[500 500], ... + 'Mean',[104.00699, 116.66877, 122.67892], 'SwapRB',false, 'Crop',crop); net.setInput(blob, 'data'); %% Make forward pass % computes output tic -score = net.forward('score'); +score = net.forward('score'); % 1x21x500x500 toc %% Gather output score = permute(score, [3 4 2 1]); % num,cn,row,col -> row,col,cn,num % max scores and corresponding labels -[S,L] = max(score, [], 3); - -% free memory -%clear net +[S,L] = max(score, [], 3); % score is 500x500x21 %% Result -% for display, recover image from blob as fed to network -if true - rgb = permute(uint8(round(blob)), [3 4 2 1]); -elseif true - rgb = flip(cv.resize(img, [500 500]), 3); +% prepare output image +if crop + if true + % recover image from blob as fed to network + rgb = permute(blob, [3 4 2 1]); + rgb = bsxfun(@plus, rgb, cat(3, 104.00699, 116.66877, 122.67892)); + rgb = uint8(round(rgb)); + else + % take input image and do center-crop + resize ourselves + sz = [size(img,2) size(img,1)]; + rgb = cv.getRectSubPix(img, [min(sz) min(sz)], sz/2); + rgb = cv.resize(rgb, [500 500]); + end else - rgb = flip(img, 3); + % direct resize of original image + rgb = cv.resize(img, [500 500]); end +rgb = flip(rgb, 3); % BGR to RGB % colorize segmentations using label colors and overlay on top of image out = reshape(colors(L(:),:), [size(L) 3]); out = cv.addWeighted(rgb, 0.4, out, 0.6, 0.0); % highlight segmented objects -for lbl=2:numel(labels) % skip 1, background label +for lbl=2:numel(labels) % skip 1 (background label) % regionprops BW = (L == lbl); if nnz(BW) == 0, continue; end @@ -105,16 +104,16 @@ centroids(idx,:) = []; % show bounding box around objects + show name + str = labels{lbl}; clr = colors(lbl,:); - label = labels{lbl}; + clr = uint8(brighten(double(clr)/255, 0.6) * 255); for i=1:size(stats,1) - boundingBox = double(stats(i,1:4)); - out = cv.rectangle(out, boundingBox, 'Color',clr, 'Thickness',1); - out = cv.putText(out, label, boundingBox(1:2)-[0 6], ... - 'Color',clr, 'FontScale',0.5); + bbox = double(stats(i,1:4)); + out = cv.rectangle(out, bbox, 'Color',clr, 'Thickness',1); + out = cv.putText(out, str, bbox(1:2)-[0 6], 'Color',clr, 'FontScale',0.5); end end % display -figure(1), imshow(out), title('segmentation') -figure(2), imshow(S,[]), title('score'), colorbar +figure, imshow(out), title('segmentation') +figure, imshow(S,[]), title('max score'), colorbar diff --git a/samples/feature_detector_fast_demo.m b/samples/feature_detector_fast_demo.m index efe29544d..940dabfe0 100644 --- a/samples/feature_detector_fast_demo.m +++ b/samples/feature_detector_fast_demo.m @@ -5,7 +5,9 @@ % * understand the basics of FAST algorithm % * find corners using OpenCV functionalities for FAST algorithm. % -% +% Sources: +% +% * % %% Theory @@ -30,7 +32,7 @@ % * Consider a circle of 16 pixels around the pixel under test (See the image % below) % -% <> +% <> % % * Now the pixel $p$ is a corner if there exists a set of $n$ contiguous % pixels in the circle (of 16 pixels) which are all brighter than $I_p + t$, @@ -66,7 +68,7 @@ % * Each pixel (say $x$) in these 16 pixels can have one of the following % three states: % -% <> +% <> % % * Depending on these states, the feature vector $P$ is subdivided into 3 % subsets, $P_d$, $P_s$, $P_b$. diff --git a/samples/feature_homography_demo.m b/samples/feature_homography_demo.m index 2b714f54a..5ede93d65 100644 --- a/samples/feature_homography_demo.m +++ b/samples/feature_homography_demo.m @@ -10,10 +10,12 @@ % to map the % points. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% diff --git a/samples/feature_homography_track_demo.m b/samples/feature_homography_track_demo.m new file mode 100644 index 000000000..2446765ce --- /dev/null +++ b/samples/feature_homography_track_demo.m @@ -0,0 +1,178 @@ +%% Feature homography based planar tracking +% +% Example of using features2d framework for interactive video homography +% matching. ORB features and FLANN matcher are used. The actual tracking is +% implemented by |PlaneTracker| class. +% +% Inspired by +% +% Video: +% +% Select a textured planar object to track by drawing a box with a mouse. +% +% Sources: +% +% * +% + +function feature_homography_track_demo(vid) + % video file, and a default target to track [x,y,w,h] + win = []; + if nargin < 1 + vid = fullfile(mexopencv.root(), 'test', 'blais.mp4'); + assert(exist(vid, 'file') == 2, 'Missing video file'); + if true + win = [135 165 285 175]; % face + else + win = [136 0 366 433]; % book + end + elseif isempty(vid) + vid = 0; + end + + % open video feed, and get first frame + cap = cv.VideoCapture(vid); + pause(1); + assert(cap.isOpened(), 'Failed to open video'); + frame = cap.read(); + assert(~isempty(frame), 'Failed to read frames'); + + % prepare plot + paused = false; + tframe = zeros(size(frame), class(frame)); % target frame + drawings + hImg = imshow([frame, tframe]); + + % create and initialize tracker + tracker = PlaneTracker(); + if ~isempty(win) + onRect(win); + end + + % create ROI region selector + if ~mexopencv.isOctave() + onHelp(); + roi = RectSelector(hImg); + roi.clip = true; + roi.callback = @onRect; + else + %HACK: RectSelector not Octave compatible + %HACK: function handle to nested function not supported in Octave + roi = struct('isDragging',@()false); + end + + % listen to keyboard input + if ~mexopencv.isOctave() + %HACK: function handle to nested function not supported in Octave + set(ancestor(hImg,'figure'), 'WindowKeyPressFcn',@onType); + end + + % main loop + while ishghandle(hImg) + playing = ~paused && ~roi.isDragging(); + if playing + % read new frame + frame = cap.read(); + if isempty(frame), break; end + end + out = [frame, tframe]; + + % track and draw keypoints and boundary of target in new frame + if playing + tracked = tracker.track(frame); + if ~isempty(tracked) + tr = tracked(1); + out = cv.circle(out, tr.pt1, 2, 'Color',[255 0 0]); + out = cv.polylines(out, tr.quad, 'Closed',true, ... + 'Color',[0 255 0], 'Thickness',2); + % draw matches + out = cv.line(out, ... + bsxfun(@plus, tr.pt0, [size(frame,2) 0]), tr.pt1, ... + 'Color',[0 0 255]); + end + end + + % display result + set(hImg, 'CData',out); + if playing + drawnow; + else + pause(0.1); % slow down a bit if paused + end + end + cap.release(); + if isobject(roi), delete(roi); end + + % --- Callback functions --- + + function onRect(rect) + %ONRECT Callback for ROI selector + % + % onRect(rect) + % + % ## Input + % * __rect__ selected rectangle [x,y,w,h], or empty + % + + if isempty(rect), return; end + + % selection must be made in left image (current frame) + rect = cv.Rect.intersect(rect, [0 0 size(frame,2) size(frame,1)]); + if cv.Rect.area(rect) < 1, return; end + + % track new target + disp('New target...') + tracker.clear(); + tracker.addTarget(frame, rect); + + % draw keypoints and boundary in target frame + if ~isempty(tracker.targets) + t = tracker.targets(1); + tframe = cv.drawKeypoints(t.image, t.kpts, 'Color',[255 0 0]); + tframe = cv.rectangle(tframe, t.rect(1:2), t.rect(3:4), ... + 'Color',[0 255 0], 'Thickness',2); + else + tframe(:) = 0; + end + + % un-pause + paused = false; + end + + function onType(hfig, e) + %ONTYPE Event handler for key press on figure + + switch e.Key + case {'q', 'escape'} + close(hfig); + + case 'h' + onHelp(); + + case {'space', 'p'} + disp('Toggle pause...'); + paused = ~paused; + + case {'c', 'r'} + disp('Clearing tracker...'); + tracker.clear(); + tframe(:) = 0; + end + end + + function onHelp() + %ONHELP Display usage help dialog + + h = helpdlg({ + 'Select object(s) to track using the mouse.' + 'Hot keys:' + ' q - quit' + ' h - help' + ' p - pause' + ' c - clear targets' + }); + + % wait for user to accept dialog + set(h, 'WindowStyle','modal'); + waitfor(h); + end +end diff --git a/samples/ffilldemo_gui.m b/samples/ffilldemo_gui.m index 6415d4751..5c5fe0573 100644 --- a/samples/ffilldemo_gui.m +++ b/samples/ffilldemo_gui.m @@ -5,8 +5,10 @@ % % * % -% , -% +% Sources: +% +% * +% * % function varargout = ffilldemo_gui(im) diff --git a/samples/file_storage_demo.m b/samples/file_storage_demo.m index 020562e20..addb6eb38 100644 --- a/samples/file_storage_demo.m +++ b/samples/file_storage_demo.m @@ -1,4 +1,4 @@ -%% Serialization demo +%% Serialization Functionality % Demonstrate the usage of OpenCV serialization functionality. % % This program demonstrates the use of |cv.FileStorage| for serialization in @@ -6,7 +6,9 @@ % the file end type. You should try using different file extensions % (e.g. |.yaml|, |.yml|, |.xml|, |.xml.gz|, |.yaml.gz|, etc.) % -% +% Sources: +% +% * % %% Data diff --git a/samples/findContours_demo_gui.m b/samples/findContours_demo_gui.m index 74adc5439..408709746 100644 --- a/samples/findContours_demo_gui.m +++ b/samples/findContours_demo_gui.m @@ -6,8 +6,10 @@ % * % * % -% -% +% Sources: +% +% * +% * % function varargout = findContours_demo_gui(im) diff --git a/samples/fitellipse_demo_gui.m b/samples/fitellipse_demo_gui.m index cc49578db..81e3a7eb0 100644 --- a/samples/fitellipse_demo_gui.m +++ b/samples/fitellipse_demo_gui.m @@ -1,18 +1,24 @@ %% Fit ellipses demo % This program is demonstration for ellipse fitting. The program finds -% contours and approximate them by ellipses. +% contours and approximate them by ellipses using one of three methods: +% +% # OpenCV's original method which implements Fitzgibbon 1995 method. +% # The Approximate Mean Square (AMS) method proposed by Taubin 1991. +% # The Direct least square (Direct) method proposed by Fitzgibbon 1999. % % Trackbar specify threshold parameter. % -% White lines are contours. Red lines are fitting ellipses. +% White lines are contours points. Red lines are fitting ellipses. +% +% Sources: % -% +% * % function varargout = fitellipse_demo_gui(im) % load source image if nargin < 1 - im = fullfile(mexopencv.root(),'test','stuff.jpg'); + im = fullfile(mexopencv.root(),'test','ellipses.jpg'); src = cv.imread(im, 'Grayscale',true); elseif ischar(im) src = cv.imread(im, 'Grayscale',true); @@ -31,32 +37,39 @@ function onChange(~,~,h) %ONCHANGE Event handler for UI controls % retrieve current values from UI controls + algIdx = get(h.pop, 'Value'); + algs = get(h.pop, 'String'); thresh = round(get(h.slid, 'Value')); set(h.txt, 'String',sprintf('Threshold: %3d',thresh)); - % threshold image and compute find contours - bimg = uint8(h.src >= thresh) * 255; + % threshold image and find contours + bimg = uint8(cv.blur(h.src) >= thresh) * 255; contours = cv.findContours(bimg, 'Mode','List', 'Method','None'); + % filter out contours that are too simple, probably not an ellipse + % (note: fitEllipse requires at least 5 points) + contours(cellfun(@numel, contours) < 50) = []; + + % {{[x y], [x y], ..}, ..} -> {[x y; x y; ..], ..} + contours = cellfun(@(c) cat(1,c{:}), contours, 'UniformOutput',false); + + % draw all contours points + if true + cimg = cv.cvtColor(h.src * 0.3, 'GRAY2RGB'); + else + cimg = zeros([size(bimg) 3], 'uint8'); + end + cimg = cv.drawContours(cimg, contours, 'Color',[255 255 255]); + % for each contour - cimg = zeros([size(bimg) 3], 'uint8'); for i=1:numel(contours) - if numel(contours{i}) < 6 - % skip: contour too simple, probably not an ellipse - continue; - end - % approximate by an ellipse - rrect = cv.fitEllipse(contours{i}); + rrect = cv.fitEllipse(contours{i}, 'Method',algs{algIdx}); if max(rrect.size) > min(rrect.size)*30 - % skip: rectangle too tall/wide + % skip if rectangle is too tall/wide continue; end - % draw contour - cimg = cv.drawContours(cimg, contours, 'ContourIdx',i-1, ... - 'Color',[255 255 255]); - % draw ellipse if true cimg = cv.ellipse(cimg, rrect, 'Color',[255 0 0], 'LineType','AA'); @@ -104,14 +117,16 @@ function onChange(~,~,h) axes(h.ax); h.img = imshow(img); end + h.pop = uicontrol('Parent',h.fig, 'Style','popupmenu', ... + 'Position',[5 5 70 20], 'String',{'Linear', 'Direct', 'AMS'}); h.txt = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... - 'Position',[5 5 130 20], 'String',sprintf('Threshold: %3d',thresh)); + 'Position',[75 5 120 20], 'String',sprintf('Threshold: %3d',thresh)); h.slid = uicontrol('Parent',h.fig, 'Style','slider', 'Value',thresh, ... - 'Min',0, 'Max',max_thresh, 'SliderStep',[1 10]./(max_thresh-0), ... - 'Position',[135 5 sz(2)-135-5 20]); + 'Min',1, 'Max',max_thresh, 'SliderStep',[1 10]./(max_thresh-1), ... + 'Position',[200 5 sz(2)-200-5 20]); % hook event handlers, and trigger default start - set(h.slid, 'Callback',{@onChange,h}, ... + set([h.slid, h.pop], 'Callback',{@onChange,h}, ... 'Interruptible','off', 'BusyAction','cancel'); onChange([],[],h); end diff --git a/samples/fitline_demo_gui.m b/samples/fitline_demo_gui.m index ea41fdbaf..b29674df7 100644 --- a/samples/fitline_demo_gui.m +++ b/samples/fitline_demo_gui.m @@ -5,7 +5,9 @@ % Switch between the different M-estimator functions and see, how well the % robust functions fit the line even in case of ~50% of outliers. % -% +% Sources: +% +% * % function varargout = fitline_demo_gui() @@ -17,11 +19,14 @@ function pts = sample_line(p1, p2, num, noise) %SAMPLE_LINE Sample points from line segment % + % pts = sample_line(p1, p2, num) + % pts = sample_line(p1, p2, num, noise) + % % ## Input % * __p1__ first line point % * __p2__ second line point % * __num__ number of points to sample - % * __noise__ gaussian noise added + % * __noise__ gaussian noise added, default 0 % % ## Output % * __pts__ points matrix num-by-2 diff --git a/samples/gabor_demo.m b/samples/gabor_demo.m index ff125ed78..57a1d4749 100644 --- a/samples/gabor_demo.m +++ b/samples/gabor_demo.m @@ -4,7 +4,9 @@ % get % image effect. % -% +% Sources: +% +% * % %% diff --git a/samples/gausian_median_blur_bilateral_filter.m b/samples/gausian_median_blur_bilateral_filter.m index 4c7b5ebf5..b3910ac54 100644 --- a/samples/gausian_median_blur_bilateral_filter.m +++ b/samples/gausian_median_blur_bilateral_filter.m @@ -7,7 +7,9 @@ % * % * % -% +% Sources: +% +% * % %% Theory @@ -133,7 +135,7 @@ % filter. % %% -dbtype smoothing_demo 24:28 +dbtype smoothing_demo 26:30 %% % We specify the following arguments (for more details, check the function % reference): @@ -153,7 +155,7 @@ % It is performed by the function |cv.GaussianBlur()|: % %% -dbtype smoothing_demo 31:35 +dbtype smoothing_demo 33:37 %% % Here we use the following arguments: % @@ -174,7 +176,7 @@ % This filter is provided by the |cv.medianBlur()| function: % %% -dbtype smoothing_demo 38:42 +dbtype smoothing_demo 40:44 %% % We use these arguments: % @@ -190,7 +192,7 @@ % Provided by OpenCV function |cv.bilateralFilter()|. % %% -dbtype smoothing_demo 45:50 +dbtype smoothing_demo 47:52 %% % We use the following arguments: % diff --git a/samples/gaussian_mix_demo.m b/samples/gaussian_mix_demo.m index 6be3c7453..5b858f223 100644 --- a/samples/gaussian_mix_demo.m +++ b/samples/gaussian_mix_demo.m @@ -1,9 +1,11 @@ -%% Gaussian mixture model demo +%% Gaussian Mixture Model (GMM) % % Demonstrates EM clustering, and also compares againt K-means clustering. % -% , -% +% Sources: +% +% * +% * % function gaussian_mix_demo() @@ -60,7 +62,7 @@ function gaussian_mix_demo() function [pts, labels, mus, sigmas] = make_gaussian_mixture(K, sz) %MAKE_GAUSSIAN_MIXTURE Random points from Gaussian mixture distribution % - % [pts, labels, mus, sigmas] = make_gaussian_mixture(K, sz) + % [pts, labels, mus, sigmas] = make_gaussian_mixture(K, sz) % % ## Input % * __K__ number of components @@ -96,7 +98,7 @@ function gaussian_mix_demo() function X = my_mvnrnd(mu, sigma, num) %MY_MVNRND Random points from Gaussian distribution % - % X = my_mvnrnd(mu, sigma, num) + % X = my_mvnrnd(mu, sigma, num) % % ## Input % * __mu__ 1x2 mean vector @@ -121,7 +123,7 @@ function gaussian_mix_demo() function img = draw_gaussian(img, mu, sigma, clr) %DRAW_GAUSSIAN Draw a bivariate Gaussian % - % img = draw_gaussian(img, mu, sigma, clr) + % img = draw_gaussian(img, mu, sigma, clr) % % ## Input % * __img__ input image on which to draw @@ -138,7 +140,7 @@ function gaussian_mix_demo() % * its covariance matrix eigenvector corresponding to largest eigenvalue % is the direction of the ellipse % * its (scaled) eigenvalues are the major/minor axis lengths of the - % ellipse + % ellipse % [w,u,~] = cv.SVD.Compute(sigma); diff --git a/samples/generalContours_demo1.m b/samples/generalContours_demo1.m index 3f27f5ab5..9c13bc3dc 100644 --- a/samples/generalContours_demo1.m +++ b/samples/generalContours_demo1.m @@ -6,8 +6,10 @@ % * use the OpenCV function % * use the OpenCV function % -% , -% +% Sources: +% +% * +% * % %% Input diff --git a/samples/generalContours_demo2.m b/samples/generalContours_demo2.m index 80eb1956a..e26aa060e 100644 --- a/samples/generalContours_demo2.m +++ b/samples/generalContours_demo2.m @@ -7,8 +7,10 @@ % * use the OpenCV function % * use the OpenCV function % -% , -% +% Sources: +% +% * +% * % %% Input diff --git a/samples/generalized_hough_demo.m b/samples/generalized_hough_demo.m index 01726bd2b..e7db64e0a 100644 --- a/samples/generalized_hough_demo.m +++ b/samples/generalized_hough_demo.m @@ -1,8 +1,10 @@ -%% Generalized Hough transform demo +%% Generalized Hough transform % This program demonstrates arbitrary object finding with the Generalized % Hough transform. % -% +% Sources: +% +% * % %% Input images diff --git a/samples/generic_corner_detector_demo.m b/samples/generic_corner_detector_demo.m index 85142c536..5633e8ef5 100644 --- a/samples/generic_corner_detector_demo.m +++ b/samples/generic_corner_detector_demo.m @@ -15,12 +15,14 @@ % * To implement our own version of the Harris detector as well as the % Shi-Tomasi detector, by using the two functions above. % -% , -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * +% * % %% Theory @@ -52,7 +54,7 @@ % over $I$ (with displacements $u$ in the x direction and $v$ in the y % direction) and will calculate the variation of intensity. % -% $$E(u,v) = \sum _{x,y} w(x,y)[ I(x+u,y+v) - I(x,y)]^{2}$$ +% $$E(u,v) = \sum_{x,y} w(x,y)[ I(x+u,y+v) - I(x,y)]^{2}$$ % % where: % @@ -64,18 +66,18 @@ % with a large variation in intensity. Hence, we have to maximize the equation % above, specifically the term: % -% $$\sum _{x,y}[ I(x+u,y+v) - I(x,y)]^{2}$$ +% $$\sum_{x,y}[ I(x+u,y+v) - I(x,y)]^{2}$$ % % Using _Taylor expansion_: % -% $$E(u,v) \approx \sum _{x,y}[ I(x,y) + u I_{x} + vI_{y} - I(x,y)]^{2}$$ +% $$E(u,v) \approx \sum_{x,y}[ I(x,y) + u I_{x} + vI_{y} - I(x,y)]^{2}$$ % % Here, $I_x$ and $I_y$ are image derivatives in x and y directions % respectively (can be easily found out using |cv.Sobel|). % % Expanding the equation and cancelling properly: % -% $$E(u,v) \approx \sum _{x,y} u^{2}I_{x}^{2} + 2uvI_{x}I_{y} + v^{2}I_{y}^{2}$$ +% $$E(u,v) \approx \sum_{x,y} u^{2}I_{x}^{2} + 2uvI_{x}I_{y} + v^{2}I_{y}^{2}$$ % % Which can be expressed in a matrix form as: % @@ -131,7 +133,7 @@ % % It can be represented in a nice picture as follows: % -% <> +% <> % % So the result of Harris Corner Detection is a grayscale image with these % scores. Thresholding for a suitable give you the corners in the image. @@ -151,7 +153,7 @@ % we plot it in $\lambda_1 - \lambda_2$ space as we did in Harris Corner % Detector, we get an image as below: % -% <> +% <> % % From the figure, you can see that only when $\lambda_1$ and $\lambda_2$ are % above a minimum value, $\lambda_{min}$, it is conidered as a corner diff --git a/samples/grabcut_demo_gui.m b/samples/grabcut_demo_gui.m index 581e0a064..a2f733b75 100644 --- a/samples/grabcut_demo_gui.m +++ b/samples/grabcut_demo_gui.m @@ -4,9 +4,11 @@ % This program demonstrates GrabCut segmentation: select an object in a % region and then grabcut will attempt to segment it out. % -% , -% , -% +% Sources: +% +% * +% * +% * % %% Theory @@ -35,7 +37,7 @@ % rectangle. Then some final touchups with white strokes (denoting foreground) % and black strokes (denoting background) is made. And we get a nice result. % -% <> +% <> % % So what happens in background ? % @@ -46,7 +48,7 @@ % considered as hard-labelling which means they won't change in the process. % * Computer does an initial labelling depeding on the data we gave. It labels % the foreground and background pixels (or it hard-labels) -% * Now a Gaussian Mixture Model(GMM) is used to model the foreground and +% * Now a Gaussian Mixture Model (GMM) is used to model the foreground and % background. % * Depending on the data we gave, GMM learns and create new pixel % distribution. That is, the unknown pixels are labelled either probable @@ -72,7 +74,7 @@ % It is illustrated in below image % (Image Courtesy: ) % -% <> +% <> % %% Code diff --git a/samples/hdr_imaging_demo.m b/samples/hdr_imaging_demo.m index 7d98976d3..cd542b47c 100644 --- a/samples/hdr_imaging_demo.m +++ b/samples/hdr_imaging_demo.m @@ -1,8 +1,10 @@ %% High Dynamic Range Imaging % -% , -% , -% +% Sources: +% +% * +% * +% * % %% Introduction diff --git a/samples/hi_res_browse_demo.m b/samples/hi_res_browse_demo.m index 48c1048c8..2dd160ff5 100644 --- a/samples/hi_res_browse_demo.m +++ b/samples/hi_res_browse_demo.m @@ -1,4 +1,4 @@ -%% Hi-resolution image navigation +%% Hi-Resolution Image Navigation % % Sample shows how to implement a simple hi-resolution image navigation. % Similar to |imtool|. @@ -6,7 +6,9 @@ % Use the mouse to move the preview window, and the scroll wheel to change % the preview size. % -% +% Sources: +% +% * % function varargout = hi_res_browse_demo(im) diff --git a/samples/hist_demo_gui.m b/samples/hist_demo_gui.m index 57184416a..64e9a499e 100644 --- a/samples/hist_demo_gui.m +++ b/samples/hist_demo_gui.m @@ -1,7 +1,9 @@ -%% Histogram demo +%% Intensity Image Histogram % This program demonstrates the use of |cv.calcHist| for histogram creation. % -% +% Sources: +% +% * % function varargout = hist_demo_gui(im) diff --git a/samples/histogram_2d_demo.m b/samples/histogram_2d_demo.m index adf4b5eb5..9cdeb7fa2 100644 --- a/samples/histogram_2d_demo.m +++ b/samples/histogram_2d_demo.m @@ -1,4 +1,4 @@ -%% 2D Histograms +%% 2D Histogram % % We previously showed how to calculate and plot one-dimensional histograms. % It is called one-dimensional because we are taking only one feature into our @@ -10,8 +10,10 @@ % We explain how to create such a color histogram, which will be useful in % understanding further topics like Histogram Back-Projection % -% , -% +% Sources: +% +% * +% * % %% Theory diff --git a/samples/histogram_calculation_demo.m b/samples/histogram_calculation_demo.m index 84979c207..17560b3b8 100644 --- a/samples/histogram_calculation_demo.m +++ b/samples/histogram_calculation_demo.m @@ -6,8 +6,10 @@ % * Calculate histograms of arrays of images by using the OpenCV function % |cv.calcHist| % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -20,7 +22,7 @@ % Let's see an example. Imagine that a matrix contains information of an image % (i.e. intensity in the range $0-255$): % -% <> +% <> % % What happens if we want to _count_ this data in an organized way? Since we % know that the _range_ of information value for this case is 256 values, we @@ -34,7 +36,7 @@ % $bin_{i}$. Applying this to the example above we get the image below % (axis x represents the bins and axis y the number of pixels in each of them). % -% <> +% <> % % This was just a simple example of how an histogram works and why it is % useful. An histogram can keep count not only of color intensities, but of diff --git a/samples/histogram_comparison_demo.m b/samples/histogram_comparison_demo.m index 50c327784..e723197fe 100644 --- a/samples/histogram_comparison_demo.m +++ b/samples/histogram_comparison_demo.m @@ -6,8 +6,10 @@ % express how well two histograms match with each other % * Use different metrics to compare histograms % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -23,7 +25,9 @@ % $$d(H_1,H_2) = \frac{\sum_I (H_1(I) - \bar{H_1}) (H_2(I) - \bar{H_2})} % {\sqrt{\sum_I(H_1(I) - \bar{H_1})^2 % \sum_I(H_2(I) - \bar{H_2})^2}}$$ +% % where +% % $$\bar{H_k} = \frac{1}{N} \sum _J H_k(J)$$ % % and $N$ is the total number of histogram bins. @@ -60,9 +64,9 @@ %% % Load source images (base image and the two other images to compare) im = { - 'http://docs.opencv.org/3.2.0/Histogram_Comparison_Source_0.jpg' - 'http://docs.opencv.org/3.2.0/Histogram_Comparison_Source_1.jpg' - 'http://docs.opencv.org/3.2.0/Histogram_Comparison_Source_2.jpg' + 'https://docs.opencv.org/3.3.1/Histogram_Comparison_Source_0.jpg' + 'https://docs.opencv.org/3.3.1/Histogram_Comparison_Source_1.jpg' + 'https://docs.opencv.org/3.3.1/Histogram_Comparison_Source_2.jpg' }; src = cell(3,1); for i=1:3 @@ -135,7 +139,7 @@ disp(t); else %HACK: use cell array instead of table - t = [{''}, names; algs(:), arrayfun(@num2str,D,'Uniform',false)]; + t = [{''}, names; algs(:), arrayfun(@num2str,D,'UniformOutput',false)]; t = t'; fprintf('%13s %9s %9s %9s %9s\n',t{:}); end diff --git a/samples/histogram_equalization_demo.m b/samples/histogram_equalization_demo.m index b5cc10d8f..758e570ad 100644 --- a/samples/histogram_equalization_demo.m +++ b/samples/histogram_equalization_demo.m @@ -6,8 +6,10 @@ % * How to equalize histograms of images by using the OpenCV function % |cv.equalizeHist| % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -16,7 +18,7 @@ % distribution of an image. It quantifies the number of pixels for each % intensity value considered. % -% <> +% <> % % Histogram Equalization is a method that improves the contrast in an image, % in order to stretch out the intensity range. To make it clearer, from the @@ -27,7 +29,7 @@ % we get an histogram like the figure in the center. The resulting image is % shown in the picture at right. % -% <> +% <> % % Equalization implies _mapping_ one distribution (the given histogram) to % another distribution (a wider and more uniform distribution of intensity @@ -43,7 +45,7 @@ % that the maximum value is 255 (or the maximum value for the intensity of the % image). From the example above, the cumulative function is: % -% <> +% <> % % Finally, we use a simple remapping procedure to obtain the intensity values % of the equalized image: diff --git a/samples/hitmiss_demo.m b/samples/hitmiss_demo.m index 3cf72ec72..f39a9876c 100644 --- a/samples/hitmiss_demo.m +++ b/samples/hitmiss_demo.m @@ -9,8 +9,10 @@ % % We will use the OpenCV function |cv.morphologyEx|. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -40,7 +42,7 @@ % The structuring elements $B_1$ and $B_2$ can be combined into a single % element $B$. Let's see an example: % -% <> +% <> % % *Structuring elements (kernels). Left: kernel to 'hit'. Middle: kernel to % 'miss'. Right: final combined kernel* @@ -51,11 +53,11 @@ % any kind, we don't care about them. Now, let's apply this kernel to an input % image: % -% <> +% <> % % You can see that the pattern is found in just one location within the image. % -% <> +% <> % %% Other examples % @@ -64,11 +66,11 @@ % % * Kernel and output result for finding top-right corners % -% <> +% <> % % * Kernel and output result for finding left end points % -% <> +% <> % % Now try your own patterns! % diff --git a/samples/hough_circles_demo.m b/samples/hough_circles_demo.m index 6fa849f3f..75de4bb6c 100644 --- a/samples/hough_circles_demo.m +++ b/samples/hough_circles_demo.m @@ -5,11 +5,13 @@ % how to use the OpenCV function |cv.HoughCircles| to detect circles in an % image. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Theory @@ -27,7 +29,7 @@ % and $r$ is the radius, which allows us to completely define a circle, as it % can be seen below: % -% <> +% <> % % For sake of efficiency, OpenCV implements a detection method slightly % trickier than the standard Hough Transform: _The Hough gradient method_, diff --git a/samples/hough_lines_demo.m b/samples/hough_lines_demo.m index 4ecb393fc..3acecb98c 100644 --- a/samples/hough_lines_demo.m +++ b/samples/hough_lines_demo.m @@ -5,11 +5,13 @@ % We show how to use the OpenCV functions |cv.HoughLines| and |cv.HoughLinesP| % to detect lines in an image. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Theory @@ -26,7 +28,7 @@ % * In the *Cartesian coordinate system:* Parameters: $(m,b)$. % * In the *Polar coordinate system:* Parameters: $(r,\theta)$ % -% <> +% <> % % For Hough Transforms, we will express lines in the _Polar system_. Hence, a % line equation can be written as: @@ -39,7 +41,7 @@ % In general for each point $(x_{0}, y_{0})$, we can define the family of % lines that goes through that point as: % -% $$r_{\theta} = x_{0} \cdot \cos \theta + y_{0} \cdot \sin \theta$$ +% $$r_{\theta} = x_{0} \cdot \cos \theta + y_{0} \cdot \sin \theta$$ % % Meaning that each pair $(r_{\theta},\theta)$ represents each line that % passes by $(x_{0}, y_{0})$. @@ -48,7 +50,7 @@ % through it, we get a sinusoid. For instance, for $x_{0} = 8$ and $y_{0} = 6$ % we get the following plot (in a plane $\theta$ - $r$): % -% <> +% <> % % We consider only points such that $r > 0$ and $0< \theta < 2 \pi$. % @@ -58,7 +60,7 @@ % the example above and drawing the plot for two more points: % $x_{1} = 4$, $y_{1} = 9$ and $x_{2} = 12$, $y_{2} = 3$, we get: % -% <> +% <> % % The three plots intersect in one single point $(0.925, 9.6)$, these % coordinates are the parameters ($\theta, r$) or the line in which diff --git a/samples/hull_demo_gui.m b/samples/hull_demo_gui.m index d42fdec53..4e792e8b6 100644 --- a/samples/hull_demo_gui.m +++ b/samples/hull_demo_gui.m @@ -4,8 +4,10 @@ % In this sample you will learn how to use the OpenCV function % . % -% -% +% Sources: +% +% * +% * % function varargout = hull_demo_gui(im) diff --git a/samples/image_alignment_demo.m b/samples/image_alignment_demo.m index e414b3fe1..068507b34 100644 --- a/samples/image_alignment_demo.m +++ b/samples/image_alignment_demo.m @@ -6,7 +6,9 @@ % matrix is not specified, the identity transformation is used to initialize % the algorithm. % -% +% Sources: +% +% * % %% Options diff --git a/samples/image_bitwise_demo.m b/samples/image_bitwise_demo.m index dc0f79038..f5d3ec8d0 100644 --- a/samples/image_bitwise_demo.m +++ b/samples/image_bitwise_demo.m @@ -1,9 +1,11 @@ -%% Bitwise Operations +%% Bitwise Operations on Images % % In this demo, we show how to perform arithmetic operations on images like % addition, subtraction, bitwise operations, etc. % -% +% Sources: +% +% * % %% Theory diff --git a/samples/image_similarity_demo.m b/samples/image_similarity_demo.m index 4eaadfa9e..ab430df56 100644 --- a/samples/image_similarity_demo.m +++ b/samples/image_similarity_demo.m @@ -1,9 +1,11 @@ %% Similarity measurement (PSNR and SSIM) % Image similarity under lossy compression. % -% , -% , -% +% Sources: +% +% * +% * +% * % %% PSNR @@ -21,7 +23,7 @@ % % $$PSNR = 10 \cdot \log_{10} \left( \frac{MAX_I^2}{MSE} \right)$$ % -% Here the $MAX_I^2$ is the maximum valid value for a pixel. In case of the +% Here the $MAX_I$ is the maximum valid value for a pixel. In case of the % simple single byte image per pixel per channel this is 255. When two images % are the same the MSE will give zero, resulting in an invalid divide by zero % operation in the PSNR formula. In this case the PSNR is undefined and as diff --git a/samples/inpaint_demo.m b/samples/inpaint_demo.m index 80122120a..d5866d6eb 100644 --- a/samples/inpaint_demo.m +++ b/samples/inpaint_demo.m @@ -6,9 +6,11 @@ % Inpainting repairs damage to images by flood-filling the damage with % surrounding image areas. % +% Sources: +% % * % * -% * +% * % %% Theory @@ -20,9 +22,9 @@ % a technique called image inpainting is used. The basic idea is simple: % Replace those bad marks with its neighbouring pixels so that it looks like % the neigbourhood. Consider the image shown below (taken from -% ): +% ): % -% <> +% <> % % Several algorithms were designed for this purpose and OpenCV provides two of % them. Both can be accessed by the same function, |cv.inpaint|. diff --git a/samples/kalman_demo.m b/samples/kalman_demo.m index bf0d86d90..5670753a0 100644 --- a/samples/kalman_demo.m +++ b/samples/kalman_demo.m @@ -1,4 +1,4 @@ -%% Kalman filter example +%% Tracking of rotating point using Kalman filter % % Tracking of rotating point. % Rotation speed is constant. @@ -12,8 +12,10 @@ % Pressing any key will reset the tracking with a different speed. % Close the window to stop the program. % -% , -% , +% Sources: +% +% * +% * % %% diff --git a/samples/kmeans_color_quantize_demo.m b/samples/kmeans_color_quantize_demo.m index 0998f86e7..302cb9465 100644 --- a/samples/kmeans_color_quantize_demo.m +++ b/samples/kmeans_color_quantize_demo.m @@ -3,7 +3,9 @@ % We will learn how to use |cv.kmeans| for data clustering of pixels to % perform color quantization. % -% +% Sources: +% +% * % %% Theory diff --git a/samples/kmeans_demo.m b/samples/kmeans_demo.m index fb73b49f3..972ef122d 100644 --- a/samples/kmeans_demo.m +++ b/samples/kmeans_demo.m @@ -6,8 +6,10 @@ % cluster centers and uses kmeans to move those cluster centers to their % representitive location. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -20,7 +22,7 @@ % satisfy people of all sizes. So the company make a data of people's height % and weight, and plot them on to a graph, as below: % -% <> +% <> % % Company can't create t-shirts with all the sizes. Instead, they divide % people to Small, Medium and Large, and manufacture only these 3 models which @@ -29,7 +31,7 @@ % will satisfy all the people. And if it doesn't, company can divide people to % more groups, may be five, and so on. Check image below : % -% <> +% <> % % This algorithm is an iterative process. We will explain it step-by-step with % the help of images. @@ -37,7 +39,7 @@ % Consider a set of data as below (you can consider it as t-shirt problem). % We need to cluster this data into two groups. % -% <> +% <> % % *Step 1:* Algorithm randomly chooses two centroids, $C1$ and $C2$ % (sometimes, any two data are taken as the centroids). @@ -50,7 +52,7 @@ % In our case, we will color all |0| labelled with red, and |1| labelled with % blue. So we get following image after above operations. % -% <> +% <> % % *Step 3:* Next we calculate the average of all blue points and red points % separately and that will be our new centroids. That is $C1$ and $C2$ shift @@ -60,7 +62,7 @@ % And again, perform step 2 with new centroids and label data to |0| and |1|. % So we get result as below : % -% <> +% <> % % Now step 2 and 3 are iterated until both centroids are converged to fixed % points. (Or it may be stopped depending on the criteria we provide, like @@ -75,7 +77,7 @@ % % Final result almost looks like below : % -% <> +% <> % % So this is just an intuitive understanding of K-Means Clustering. For more % details and mathematical explanation, please read any standard machine diff --git a/samples/knn_demo.m b/samples/knn_demo.m index 94810ea58..988ce1205 100644 --- a/samples/knn_demo.m +++ b/samples/knn_demo.m @@ -4,8 +4,10 @@ % algorithm, then demonstrate how to use kNN classifier for 2D point % classification. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -14,7 +16,7 @@ % supervised learning. The idea is to search for closest match of the test % data in feature space. We will look into it with below image. % -% <> +% <> % % In the image, there are two families, Blue Squares and Red Triangles. We % call each family as *Class*. Their houses are shown in their town map which diff --git a/samples/knn_ocr_digits_demo.m b/samples/knn_ocr_digits_demo.m index a60c4a505..913d8ce26 100644 --- a/samples/knn_ocr_digits_demo.m +++ b/samples/knn_ocr_digits_demo.m @@ -1,8 +1,10 @@ -%% OCR of Hand-written Data using kNN +%% OCR of hand-written digits using kNN % % We will use kNN to build a basic OCR application. % -% +% Sources: +% +% * % %% Hand-written Digits Dataset diff --git a/samples/knn_ocr_letters_demo.m b/samples/knn_ocr_letters_demo.m index 76ddbc694..44c7a3474 100644 --- a/samples/knn_ocr_letters_demo.m +++ b/samples/knn_ocr_letters_demo.m @@ -2,7 +2,9 @@ % % We will use kNN to build a basic OCR application. % -% +% Sources: +% +% * % %% English Alphabets Dataset diff --git a/samples/lap_pyr_demo_gui.m b/samples/lap_pyr_demo_gui.m index d2f2d2e93..6575b297b 100644 --- a/samples/lap_pyr_demo_gui.m +++ b/samples/lap_pyr_demo_gui.m @@ -1,4 +1,4 @@ -%% Laplacian Pyramid construction and merging +%% Laplacian Pyramid Construction and Merging % % References: % @@ -7,7 +7,9 @@ % IEEE Trans. on Communications, vol. 31, no. 4, pages 532-540, 1983. % % -% +% Sources: +% +% * % function varargout = lap_pyr_demo_gui(varargin) @@ -39,6 +41,9 @@ function levels = build_lap_pyr(img, nlevel, dtype) %BUILD_LAP_PYR Build Laplacian pyramid % + % levels = build_lap_pyr(img, nlevel) + % levels = build_lap_pyr(img, nlevel, dtype) + % % ## Input % * __img__ input image % * __nlevel__ number of pyramid levels @@ -65,6 +70,8 @@ function img = merge_lap_pyr(levels) %MERGE_LAP_PYR Reconstruct image from Laplacian pyramid % + % img = merge_lap_pyr(levels) + % % ## Input % * __levels__ Laplacian pyramid, cell array % @@ -98,7 +105,7 @@ function onChange(~,~,h) %BUILDGUI Creates the UI % setup video capture - if nargin > 0 + if nargin > 1 cap = cv.VideoCapture(varargin{:}); else cap = cv.VideoCapture(0); diff --git a/samples/laplace_demo_gui.m b/samples/laplace_demo_gui.m index 9f9b6d66c..1fdb750e1 100644 --- a/samples/laplace_demo_gui.m +++ b/samples/laplace_demo_gui.m @@ -1,9 +1,11 @@ -%% Laplacian Edge Detection demo +%% Laplacian Edge Detection % This program demonstrates Laplace point/edge detection using OpenCV % function |cv.Laplacian|. % It captures from the camera by default. % -% +% Sources: +% +% * % function varargout = laplace_demo_gui() diff --git a/samples/laplace_operator_demo.m b/samples/laplace_operator_demo.m index 8fd5483db..5fc624678 100644 --- a/samples/laplace_operator_demo.m +++ b/samples/laplace_operator_demo.m @@ -3,8 +3,10 @@ % In this demo, we show how to use the OpenCV function |cv.Laplacian| to % implement a discrete analog of the _Laplacian operator_. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -15,11 +17,11 @@ % we observed that an edge is characterized by a maximum, as it can be seen in % the figure: % -% <> +% <> % % And what happens if we take the second derivative? % -% <> +% <> % % You can observe that the second derivative is zero! So, we can also use this % criterion to attempt to detect edges in an image. However, note that zeros diff --git a/samples/linear_blending_demo_gui.m b/samples/linear_blending_demo_gui.m index d460eee1f..265fe3b72 100644 --- a/samples/linear_blending_demo_gui.m +++ b/samples/linear_blending_demo_gui.m @@ -1,4 +1,4 @@ -%% Blending two images +%% Image Blending % % In this demo, we add two images using |cv.addWeighted| which defines a % linear blend operator: @@ -8,11 +8,13 @@ % By varying $\alpha$ from $0 \rightarrow 1$ this operator can be used to % perform a temporal *cross-dissolve* between two images or videos. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % function varargout = linear_blending_demo_gui(im1, im2) diff --git a/samples/linear_transform_demo_gui.m b/samples/linear_transform_demo_gui.m index 67e947180..c2cbbd8e4 100644 --- a/samples/linear_transform_demo_gui.m +++ b/samples/linear_transform_demo_gui.m @@ -1,12 +1,14 @@ -%% Brightness and contrast adjustments Demo +%% Brightness and contrast adjustments % % In this demo we show how to perform the operation % $g(i,j) = \alpha \cdot f(i,j) + \beta$. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Theory @@ -61,7 +63,7 @@ % saturated (i.e. a pixel value higher/lesser than 255/0 will be clamp to % 255/0). % -% <> +% <> % % _In light gray, histogram of the original image, in dark gray when % |brightness = 80| in Gimp_ @@ -76,7 +78,7 @@ % the color levels will be compressed and the result will be an image with % less contrast. % -% <> +% <> % % _In light gray, histogram of the original image, in dark gray when % |contrast < 0| in Gimp_ @@ -104,7 +106,7 @@ % As this relation is non linear, the effect will not be the same for all the % pixels and will depend to their original value. % -% <> +% <> % % When $\gamma < 1$, the original dark regions will be brighter and the % histogram will be shifted to the right whereas it will be the opposite with @@ -118,7 +120,7 @@ % % The following image has been corrected with: $\alpha = 1.3$ and $\beta = 40$. % -% <> +% <> % % The overall brightness has been improved but you can notice that the clouds % are now greatly saturated due to the numerical saturation of the @@ -128,13 +130,13 @@ % % The following image has been corrected with: $\gamma = 0.4$. % -% <> +% <> % % The gamma correction should tend to add less saturation effect as the % mapping is non linear and there is no numerical saturation possible as in % the previous method. % -% <> +% <> % % * Left: histogram after alpha, beta correction % * Center: histogram of the original image diff --git a/samples/lk_demo_gui.m b/samples/lk_demo_gui.m index a4c56082a..d5ddd944c 100644 --- a/samples/lk_demo_gui.m +++ b/samples/lk_demo_gui.m @@ -3,7 +3,9 @@ % It uses camera by default, but you can provide a path to video as an % argument. % -% +% Sources: +% +% * % function varargout = lk_demo_gui(varargin) diff --git a/samples/lk_homography_demo.m b/samples/lk_homography_demo.m index 3dd89df24..3f651edf5 100644 --- a/samples/lk_homography_demo.m +++ b/samples/lk_homography_demo.m @@ -1,10 +1,12 @@ -%% Lucas-Kanade homography tracker +%% Lucas-Kanade Homography Tracker % % Lucas-Kanade sparse optical flow demo. Uses |cv.goodFeaturesToTrack| for % track initialization and back-tracking for match verification between % frames. Finds homography between reference and current views. % -% +% Sources: +% +% * % %% Options diff --git a/samples/lk_track_demo.m b/samples/lk_track_demo.m index 4cb54bc47..5448e1346 100644 --- a/samples/lk_track_demo.m +++ b/samples/lk_track_demo.m @@ -1,10 +1,12 @@ -%% Lucas-Kanade tracker +%% Lucas-Kanade Tracker % % Lucas-Kanade sparse optical flow demo. Uses |cv.goodFeaturesToTrack| for % track initialization and back-tracking for match verification between % frames. % -% +% Sources: +% +% * % %% Options diff --git a/samples/logistic_regression_demo.m b/samples/logistic_regression_demo.m index 73d35fc2d..ebd762a23 100644 --- a/samples/logistic_regression_demo.m +++ b/samples/logistic_regression_demo.m @@ -1,9 +1,11 @@ -%% Logistic Regression demo +%% Logistic Regression % The file |data01.xml| contains digits 0 and 1 of 20 samples each, % collected on an Android device. Each of the collected images are of size % 28 x 28 re-arranged to 1 x 784 matrix. % -% +% Sources: +% +% * % %% Dataset diff --git a/samples/lsd_lines_demo.m b/samples/lsd_lines_demo.m index 6ac487f79..f4db9d4e7 100644 --- a/samples/lsd_lines_demo.m +++ b/samples/lsd_lines_demo.m @@ -1,16 +1,19 @@ %% Line Segment Detector demo % An example using the |cv.LineSegmentDetector| class. % -% +% Sources: +% +% * % %% Input image -img = imread(fullfile(mexopencv.root(),'test','building.jpg')); -gray = cv.cvtColor(img, 'RGB2GRAY'); +img = cv.imread(fullfile(mexopencv.root(),'test','building.jpg'), 'Grayscale',true); %% Preprocess -% Apply canny edge -%gray = cv.Canny(gray, [50 200], 'ApertureSize',5); +% Apply canny edge detector +if false + img = cv.Canny(img, [50 200], 'ApertureSize',5); +end %% LSD detectors % Create two LSD detectors with standard and no refinement. @@ -19,17 +22,17 @@ %% % Detect the lines both ways -tic, lines1 = lsd1.detect(gray); toc -tic, lines2 = lsd2.detect(gray); toc +tic, lines1 = lsd1.detect(img); toc +tic, lines2 = lsd2.detect(img); toc %% Result 1 % Show found lines with standard refinement -drawnLines1 = lsd1.drawSegments(gray, lines1); +drawnLines1 = lsd1.drawSegments(img, lines1); imshow(drawnLines1), title('Standard refinement') %% Result 2 % Show found lines with no refinement -drawnLines2 = lsd2.drawSegments(gray, lines2); +drawnLines2 = lsd2.drawSegments(img, lines2); imshow(drawnLines2), title('No refinement') %% Compare diff --git a/samples/lucas_kanade_demo.m b/samples/lucas_kanade_demo.m index 35fad1a33..aa670f3ad 100644 --- a/samples/lucas_kanade_demo.m +++ b/samples/lucas_kanade_demo.m @@ -7,7 +7,9 @@ % * use functions like |cv.calcOpticalFlowPyrLK| to track feature points % in a video. % -% +% Sources: +% +% * % %% Optical Flow @@ -17,7 +19,7 @@ % vector field where each vector is a displacement vector showing the movement % of points from first frame to second. Consider the image below: % -% <> +% <> % % It shows a ball moving in 5 consecutive frames. The arrow shows its % displacement vector. Optical flow has many applications in areas like: @@ -41,12 +43,15 @@ % Then take taylor series approximation of right-hand side, remove common % terms and divide by $dt$ to get the following equation: % -% $$f_x u + f_y v + f_t = 0 \;$$ +% $$f_x u + f_y v + f_t = 0$$ +% % where -% $$f_x = \frac{\partial f}{\partial x}$$; -% $$f_y = \frac{\partial f}{\partial y}$$; -% $$u = \frac{dx}{dt}$$; -% $$v = \frac{dy}{dt}$$ +% +% $$f_x = \frac{\partial f}{\partial x} \quad ; \quad +% f_y = \frac{\partial f}{\partial y}$$ +% +% $$u = \frac{dx}{dt} \quad ; \quad +% v = \frac{dy}{dt}$$ % % Above equation is called Optical Flow equation. In it, we can find $f_x$ and % $f_y$, they are image gradients. Similarly $f_t$ is the gradient along time. diff --git a/samples/mask_tmpl_demo.m b/samples/mask_tmpl_demo.m index 4ea0fc1b4..77368b865 100644 --- a/samples/mask_tmpl_demo.m +++ b/samples/mask_tmpl_demo.m @@ -1,4 +1,4 @@ -%% Template Matching demo +%% Template Matching % This program demonstrates template match with mask. % % In this demo, we show how to: @@ -8,10 +8,12 @@ % * Find the maximum and minimum values (as well as their positions) in a % given array. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Theory @@ -32,12 +34,12 @@ % % our goal is to detect the highest matching area: % -% <> +% <> % % To identify the matching area, we have to _compare_ the template image % against the source image by sliding it: % -% <> +% <> % % By *sliding*, we mean moving the patch one pixel at a time (left to right, % up to down). At each location, a metric is calculated so it represents how @@ -46,7 +48,7 @@ % For each location of |T| over |I|, you _store_ the metric in the % _result matrix_ |R|. Each location $(x,y)$ in |R| contains the match metric: % -% <> +% <> % % the image above is the result |R| of sliding the patch with a metric % |CCorrNormed|. The brightest locations indicate the highest matches. As you @@ -79,7 +81,7 @@ % Since the input images in the sample are 3-channels, the mask is also read % as color image. % -% <> +% <> % % OpenCV implements template matching in the function |cv.matchTemplate|. % There are six available methods: diff --git a/samples/minarea_demo.m b/samples/minarea_demo.m index 48d822840..d95047935 100644 --- a/samples/minarea_demo.m +++ b/samples/minarea_demo.m @@ -1,10 +1,12 @@ -%% Minimum Area Enclosing demo +%% Minimum Area Enclosing % This program demonstrates finding the minimum enclosing box, triangle or % circle of a set of points using functions: |cv.minAreaRect|, % |cv.minEnclosingTriangle|, and |cv.minEnclosingCircle|. % Random points are generated and then enclosed. % -% +% Sources: +% +% * % %% Points diff --git a/samples/moments_demo_gui.m b/samples/moments_demo_gui.m index eb9513f1e..b156f7df2 100644 --- a/samples/moments_demo_gui.m +++ b/samples/moments_demo_gui.m @@ -1,4 +1,4 @@ -%% Image Moments demo +%% Image Moments % We learn to calculate the moments of an image. % % In this sample you will learn how to use the following OpenCV functions: @@ -7,8 +7,10 @@ % * % * % -% -% +% Sources: +% +% * +% * % function varargout = moments_demo_gui(im) diff --git a/samples/morphology_demo_3.m b/samples/morphology_demo_3.m index 8cec1dbb0..e2dfae2a6 100644 --- a/samples/morphology_demo_3.m +++ b/samples/morphology_demo_3.m @@ -2,8 +2,10 @@ % Use morphology transformations for extracting horizontal and vertical lines % sample code % -% -% +% Sources: +% +% * +% * % %% Music Sheet image diff --git a/samples/morphology_demo_gui1.m b/samples/morphology_demo_gui1.m index 1e379f8dc..776fa7b47 100644 --- a/samples/morphology_demo_gui1.m +++ b/samples/morphology_demo_gui1.m @@ -1,4 +1,4 @@ -%% Erosion and Dilation Demo +%% Erosion and Dilation % % In this sample, you will learn how to apply two very common morphological % operators: Erosion and Dilation. For this purpose, you will use the @@ -7,10 +7,12 @@ % * % * % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Morphological Operations @@ -28,7 +30,7 @@ % We will explain dilation and erosion briefly, using the following image as % an example: % -% <> +% <> % %% 1) Dilation % @@ -42,7 +44,7 @@ % % Take the above image as an example. Applying dilation we can get: % -% <> +% <> % % The background (bright) dilates around the black regions of the letter. % @@ -51,7 +53,7 @@ % letter. We have performed two dilatations with a rectangular structuring % element of size |3x3|. % -% <> +% <> % % (Left image: original image inverted, right image: resulting dilatation). % @@ -69,13 +71,13 @@ % the bright areas of the image (the background, apparently), get thinner, % whereas the dark zones (the "writing") gets bigger. % -% <> +% <> % % In similar manner, the corresponding image results by applying erosion % operation on the inverted original image (two erosions with a rectangular % structuring element of size |3x3|): % -% <> +% <> % % (Left image: original image inverted, right image: resulting erosion). % diff --git a/samples/morphology_demo_gui2.m b/samples/morphology_demo_gui2.m index f94aa453c..86ecdb928 100644 --- a/samples/morphology_demo_gui2.m +++ b/samples/morphology_demo_gui2.m @@ -10,10 +10,12 @@ % * Top Hat % * Black Hat % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Theory @@ -38,13 +40,13 @@ % applying the opening transformation. We can observe that the small spaces in % the corners of the letter tend to dissapear. % -% <> +% <> % % For the sake of clarity, we have performed the opening operation (|7x7| % rectangular structuring element) on the same original image but inverted % such as the object in white is now the letter. % -% <> +% <> % % (Left image: original image inverted, right image: resulting opening). % @@ -56,12 +58,12 @@ % % Useful to remove small holes (dark regions). % -% <> +% <> % % On the inverted image, we have performed the closing operation (|7x7| % rectangular structuring element): % -% <> +% <> % % (Left image: original image inverted, right image: resulting closing). % @@ -74,7 +76,7 @@ % % It is useful for finding the outline of an object as can be seen below: % -% <> +% <> % %% 4) Top Hat % @@ -82,7 +84,7 @@ % % $$dst = tophat( src, element ) = src - open( src, element )$$ % -% <> +% <> % %% 5) Black Hat % @@ -90,7 +92,7 @@ % % $$dst = blackhat( src, element ) = close( src, element ) - src$$ % -% <> +% <> % %% References % diff --git a/samples/mosse_demo.m b/samples/mosse_demo.m new file mode 100644 index 000000000..55f6c3a46 --- /dev/null +++ b/samples/mosse_demo.m @@ -0,0 +1,183 @@ +%% Correlation-based Tracking using MOSSE Filters +% +% Draw rectangles around objects with a mouse to track them. +% The sample uses the |RectSelector| and |MOSSETracker| classes. +% +% Sources: +% +% * +% + +function mosse_demo(vid) + % video file, and a default target to track [x,y,w,h] + win = []; + if nargin < 1 + if true + vid = fullfile(mexopencv.root(), 'test', 'faceocc2.webm'); + win = [118 57 82 98]; + elseif true + vid = fullfile(mexopencv.root(), 'test', 'dudek.webm'); + win = [123 87 132 176]; + else + vid = fullfile(mexopencv.root(), 'test', 'david.webm'); + win = [129 80 64 78]; + end + if exist(vid, 'file') ~= 2 + [~,name,ext] = fileparts(vid); + url = 'https://cdn.rawgit.com/opencv/opencv_extra/3.3.1/testdata/cv/tracking/'; + urlwrite([url, name, '/data/', name, ext], vid); + end + elseif isempty(vid) + if ~mexopencv.isOctave() && mexopencv.require('vision') + vid = which('visionface.avi'); + win = [275 125 75 100]; + else + vid = 0; + end + end + + % open video feed, and get first frame + cap = cv.VideoCapture(vid); + pause(1); + assert(cap.isOpened(), 'Failed to open video'); + frame = cap.read(); + assert(~isempty(frame), 'Failed to read frames'); + gray = cv.cvtColor(frame, 'RGB2GRAY'); + + % prepare plot + paused = false; + viz_state = true; + if viz_state + figure('Position',[100 200 1000 600]) + subplot(1,4,1:3) + hVis = []; + end + hImg = imshow(frame); + + % initialize trackers array + trackers = {}; + if ~isempty(win), onRect(win); end + + % create ROI region selector + if ~mexopencv.isOctave() + onHelp(); + roi = RectSelector(hImg); + roi.callback = @onRect; + else + %HACK: RectSelector not Octave compatible + %HACK: function handle to nested function not supported in Octave + roi = struct('isDragging',@()false); + end + + % listen to keyboard input + if ~mexopencv.isOctave() + %HACK: function handle to nested function not supported in Octave + set(ancestor(hImg,'figure'), 'WindowKeyPressFcn',@onType); + end + + % main loop + while ishghandle(hImg) + playing = ~paused && ~roi.isDragging(); + if playing + % read new frame + frame = cap.read(); + if isempty(frame), break; end + gray = cv.cvtColor(frame, 'RGB2GRAY'); + + % track all targets + for i=1:numel(trackers) + trackers{i}.update(gray); + end + end + out = frame; + + % draw tracked objects + for i=1:numel(trackers) + out = trackers{i}.draw_object(out); + end + + % show state from last tracker + if viz_state && ~isempty(trackers) + [vis, kernel, resp] = trackers{end}.visualize_state(); + vis = cat(1, vis, kernel, resp); + if isempty(hVis) + subplot(144) + hVis = imshow(vis); + else + set(hVis, 'CData',vis); + end + end + + % display result + set(hImg, 'CData',out); + if playing + drawnow; + else + pause(0.1); % slow down a bit if paused + end + end + cap.release(); + if isobject(roi), delete(roi); end + + % --- Callback functions --- + + function onRect(rect) + %ONRECT Callback for ROI selector + % + % onRect(rect) + % + % ## Input + % * __rect__ selected rectangle [x,y,w,h], or empty + % + + if isempty(rect) || cv.Rect.area(rect) < 3, return; end + + % track new target + disp('Adding target...') + trackers{end+1} = MOSSETracker(gray, rect); + + % un-pause + paused = false; + end + + function onType(hfig, e) + %ONTYPE Event handler for key press on figure + + switch e.Key + case {'q', 'escape'} + close(hfig); + + case 'h' + onHelp(); + + case {'space', 'p'} + disp('Toggle pause...'); + paused = ~paused; + + case {'c', 'r'} + disp('Clearing trackers...'); + trackers = {}; + if viz_state + delete(ancestor(hVis,'axes')); + hVis = []; + end + end + end + + function onHelp() + %ONHELP Display usage help dialog + + h = helpdlg({ + 'Select object(s) to track using the mouse.' + 'Hot keys:' + ' q - quit' + ' h - help' + ' p - pause' + ' c - clear targets' + }); + + % wait for user to accept dialog + set(h, 'WindowStyle','modal'); + waitfor(h); + end +end diff --git a/samples/mouse_and_match_demo.m b/samples/mouse_and_match_demo.m new file mode 100644 index 000000000..7984532cd --- /dev/null +++ b/samples/mouse_and_match_demo.m @@ -0,0 +1,48 @@ +%% Interactive Rectangle Selection +% +% Demonstrate using a mouse to interact with an image. +% Allows the user to select parts of an image with a mouse. +% When they let go of the mouse, it correlates (using |cv.matchTemplate|) +% that patch with the image. +% +% The sample uses the |RectSelector| class. +% +% Sources: +% +% * +% + +%% +% load and show image +fname = fullfile(mexopencv.root(), 'test', 'fruits.jpg'); +img = cv.imread(fname, 'Color',true); +gray = cv.cvtColor(img, 'RGB2GRAY'); +hImg = imshow(gray); + +%% +% prompt user to select ROI rectangle +disp('Select part of the image using the mouse'); +if true + roi = RectSelector(hImg); + roi.clip = true; + setLineProps(roi, 'Color','g', 'LineWidth',2); + rect = wait(roi); + delete(roi); +else + roi = imrect(ancestor(hImg,'axes')); + setColor(roi, 'g'); + rect = wait(roi); + delete(roi); +end +assert(~isempty(rect), 'No selection'); +rectangle('Position',rect, 'EdgeColor','g', 'LineWidth',2) + +%% +% extract patch and perform template matching against image +tmpl = cv.Rect.crop(gray, rect); +res = cv.matchTemplate(gray, tmpl, 'Method','CCoeffNormed'); +res = abs(res).^3; +res = cv.threshold(res, 0.01, 'Type','ToZero'); +res8 = cv.normalize(res, 'Alpha',0, 'Beta',255, ... + 'NormType','MinMax', 'DType','uint8'); +figure, imshow(res8) diff --git a/samples/mser_demo.m b/samples/mser_demo.m index c24329699..67c88e3ca 100644 --- a/samples/mser_demo.m +++ b/samples/mser_demo.m @@ -1,7 +1,9 @@ -%% MSER detector demo +%% Maximally Stable Extremal Regions (MSER) % This program demonstrates how to use MSER to detect extremal regions. % -% +% Sources: +% +% * % %% diff --git a/samples/non_local_means_demo.m b/samples/non_local_means_demo.m index f264b664c..53360e2cc 100644 --- a/samples/non_local_means_demo.m +++ b/samples/non_local_means_demo.m @@ -1,4 +1,4 @@ -%% Image Denoising +%% Non-Local Means Image Denoising % % In this demo, we will learn: % @@ -6,6 +6,10 @@ % * how to use functions like |cv.fastNlMeansDenoising|, % |cv.fastNlMeansDenoisingColored|, and |cv.fastNlMeansDenoisingMulti| % +% Sources: +% +% * +% %% Theory % @@ -38,7 +42,7 @@ % find their average? For that particular window, that is fine. See an example % image below: % -% <> +% <> % % The blue patches in the image looks the similar. Green patches looks % similar. So we take a pixel, take small window around it, search for similar diff --git a/samples/npr_demo.m b/samples/npr_demo.m index b2dab431e..c861d7f85 100644 --- a/samples/npr_demo.m +++ b/samples/npr_demo.m @@ -1,16 +1,18 @@ -%% Non-Photorealistic Rendering demo +%% Non-Photorealistic Rendering % This tutorial demonstrates how to use OpenCV Non-Photorealistic Rendering % Module: % % * Edge Preserve Smoothing -% - Using Normalized convolution Filter -% - Using Recursive Filter +% - Using Normalized convolution Filter +% - Using Recursive Filter % * Detail Enhancement % * Pencil sketch/Color Pencil Drawing % * Stylization % -% , -% +% Sources: +% +% * +% * % %% Input Image diff --git a/samples/opt_flow_demo.m b/samples/opt_flow_demo.m index 25f04ebda..420d3d877 100644 --- a/samples/opt_flow_demo.m +++ b/samples/opt_flow_demo.m @@ -7,10 +7,12 @@ % "Two-Frame Motion Estimation Based on Polynomial Expansion" % by Gunner Farneback in 2003. % -% -% -% -% +% Sources: +% +% * +% * +% * +% * % function opt_flow_demo() diff --git a/samples/orb_demo.m b/samples/orb_demo.m index 1804a23c1..c53630702 100644 --- a/samples/orb_demo.m +++ b/samples/orb_demo.m @@ -2,7 +2,9 @@ % % In this demo, we will see the basics of ORB. % -% +% Sources: +% +% * % %% Theory diff --git a/samples/padding_demo_gui.m b/samples/padding_demo_gui.m index d3526ba60..74e72be41 100644 --- a/samples/padding_demo_gui.m +++ b/samples/padding_demo_gui.m @@ -3,8 +3,10 @@ % In this demo, we show how to use the OpenCV function |cv.copyMakeBorder| to % set the borders (extra image padding). % -% , -% +% Sources: +% +% * +% * % %% Theory diff --git a/samples/pca_demo.m b/samples/pca_demo.m index d6b03bc40..1e0f5f054 100644 --- a/samples/pca_demo.m +++ b/samples/pca_demo.m @@ -1,4 +1,4 @@ -%% PCA demo +%% PCA for dimensionality reduction % An example using PCA for dimensionality reduction while maintaining an % amount of variance. % @@ -9,7 +9,9 @@ % The author recommends using the first 15 faces of the AT&T face data set: % % -% +% Sources: +% +% * % %% Input images diff --git a/samples/pca_intro_demo.m b/samples/pca_intro_demo.m index 26888836f..2ffc7b3a5 100644 --- a/samples/pca_intro_demo.m +++ b/samples/pca_intro_demo.m @@ -3,8 +3,10 @@ % In this demo, you will learn how to use the OpenCV class |cv.PCA| to % calculate the orientation of an object. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -12,7 +14,7 @@ % Principal Component Analysis (PCA) is a statistical procedure that extracts % the most important features of a dataset. % -% <> +% <> % % Consider that you have a set of 2D points as it is shown in the figure % above. Each dimension corresponds to a feature you are interested in. Here @@ -36,7 +38,7 @@ % consist of 2 vectors called _eigenvectors_ which are the % _principal components_ of the data set. % -% <> +% <> % % The size of each eigenvector is encoded in the corresponding eigenvalue and % indicates how much the data vary along the principal component. The @@ -213,7 +215,7 @@ function pca_intro_demo() %% References % % * -% * +% * % % And special thanks to Svetlin Penkov for the original tutorial. % diff --git a/samples/peopledetect_demo.m b/samples/peopledetect_demo.m index 7bb78bc3b..17f0680a2 100644 --- a/samples/peopledetect_demo.m +++ b/samples/peopledetect_demo.m @@ -1,12 +1,14 @@ -%% People Detector Demo +%% People Detection using HoG % This program demonstrates the use of the HoG descriptor using the % pre-trained SVM model for people detection. % During execution, close figure to quit. % -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * % %% Detector diff --git a/samples/perspective_transform_gui.m b/samples/perspective_transform_gui.m index b84129ac3..766583a01 100644 --- a/samples/perspective_transform_gui.m +++ b/samples/perspective_transform_gui.m @@ -1,4 +1,4 @@ -%% Perspective Transformation demo +%% Interactive Perspective Transformation % This program demonstrates Perspective Transformation. % % In this sample you will learn how to use the following OpenCV functions: diff --git a/samples/phase_corr_demo.m b/samples/phase_corr_demo.m index 608da80cf..254be3c78 100644 --- a/samples/phase_corr_demo.m +++ b/samples/phase_corr_demo.m @@ -1,8 +1,10 @@ -%% Phase Correlation demo +%% Phase Correlation % Demonstrates estimating translational shift between two successive frames % using Phase Correlation. % -% +% Sources: +% +% * % %% Prepare video source diff --git a/samples/planar_tracker_demo.m b/samples/planar_tracker_demo.m index 813ea9c62..422e03b11 100644 --- a/samples/planar_tracker_demo.m +++ b/samples/planar_tracker_demo.m @@ -22,9 +22,11 @@ % % Example video: . % -% , -% , -% +% Sources: +% +% * +% * +% * % function varargout = planar_tracker_demo(fname) @@ -113,7 +115,7 @@ vid.release(); % show average stats - stats = stats ./ counter; + stats = stats ./ (counter / app.show_every_frame); stats(:,1:3) = round(stats(:,1:3)); for i=1:numel(f) disp(['-- ' f(i).name ' --']); @@ -219,22 +221,30 @@ sprintf('Matches: %d', stats(2)) sprintf('Inliers: %d', stats(3)) sprintf('Inliers Ratio: %.0f%%', stats(4)*100) + sprintf('FPS: %.2f', stats(5)) }; end function [kp, desc, stats] = processFirstFrame(f, app) %PROCESSFIRSTFRAME Process first frame + % init stats + stats = [0, 0, 0, 0, 0]; + % create ROI mask mask = zeros(app.sz(1:2), 'uint8'); mask = cv.fillPoly(mask, app.bb0, 'Color',255); + tm = cv.TickMeter(); + tm.start(); + % detect and compute features of first frame in ROI region [kp, desc] = f.detector.detectAndCompute(app.frame0, 'Mask',mask); assert(~isempty(kp), 'No keypoints detected in first frame'); + stats(1) = numel(kp); - % init stats - stats = [numel(kp), 0, 0, 0]; + tm.stop(); + stats(5) = 1 / tm.TimeSec; end function [out, bb1, stats] = processFrame(f, app, frame1) @@ -243,7 +253,10 @@ % initialize output out = [app.frame0, frame1]; bb1 = nan(size(app.bb0)); - stats = [0 0 0 0]; + stats = [0 0 0 0 0]; + + tm = cv.TickMeter(); + tm.start(); % detect and compute features in current frame [kp1, desc1] = f.detector.detectAndCompute(frame1); @@ -277,6 +290,9 @@ stats(4) = stats(3) / stats(2); if isempty(H), return; end + tm.stop(); + stats(5) = 1 / tm.TimeSec; + % project object bounding box using homography to locate it in new frame bb1 = cv.perspectiveTransform(app.bb0, H); diff --git a/samples/plane_ar_demo.m b/samples/plane_ar_demo.m new file mode 100644 index 000000000..253da7893 --- /dev/null +++ b/samples/plane_ar_demo.m @@ -0,0 +1,223 @@ +%% Planar augmented reality +% +% This sample shows an example of augmented reality overlay over a tracked +% planar object. The function |cv.solvePnP| is used to estimate the tracked +% object location in 3D space. +% +% Select a textured planar object to track by drawing a box with the mouse. +% It uses the |RectSelector| and |PlaneTracker| classes. +% +% Sample video: . +% +% Sources: +% +% * +% + +function plane_ar_demo(vid) + % video file, and a default target to track [x,y,w,h] + win = []; + if nargin < 1 + vid = fullfile(mexopencv.root(), 'test', 'blais.mp4'); + assert(exist(vid, 'file') == 2, 'Missing video file'); + if true + win = [135 165 285 175]; % face + else + win = [136 0 366 433]; % book + end + elseif isempty(vid) + vid = 0; + end + + % open video feed, and get first frame + cap = cv.VideoCapture(vid); + pause(1); + assert(cap.isOpened(), 'Failed to open video'); + frame = cap.read(); + assert(~isempty(frame), 'Failed to read frames'); + + % prepare plot + paused = false; + hImg = imshow(frame); + + % create and initialize tracker + tracker = PlaneTracker(); + if ~isempty(win) + tracker.addTarget(frame, win); + end + + % create ROI region selector + if ~mexopencv.isOctave() + onHelp(); + roi = RectSelector(hImg); + roi.callback = @onRect; + else + %HACK: RectSelector not Octave compatible + %HACK: function handle to nested function not supported in Octave + roi = struct('isDragging',@()false); + end + + % listen to keyboard input + if ~mexopencv.isOctave() + %HACK: function handle to nested function not supported in Octave + set(ancestor(hImg,'figure'), 'WindowKeyPressFcn',@onType); + end + + % main loop + while ishghandle(hImg) + playing = ~paused && ~roi.isDragging(); + if playing + % read new frame + frame = cap.read(); + if isempty(frame), break; end + end + out = frame; + + % track and draw keypoints, boundary, and pose of targets + if playing + tracked = tracker.track(frame); + for i=1:numel(tracked) + tr = tracked(i); + out = cv.circle(out, tr.pt1, 2, 'Color',[0 0 255]); + out = cv.polylines(out, tr.quad, 'Closed',true, ... + 'Color',[0 0 255], 'Thickness',2); + out = drawOverlay(out, tr); + end + end + + % display result + set(hImg, 'CData',out); + if playing + drawnow; + else + pause(0.1); % slow down a bit if paused + end + end + cap.release(); + if isobject(roi), delete(roi); end + + % --- Callback functions --- + + function onRect(rect) + %ONRECT Callback for ROI selector + % + % onRect(rect) + % + % ## Input + % * __rect__ selected rectangle [x,y,w,h], or empty + % + + if isempty(rect), return; end + + % track new target + disp('Adding target...') + tracker.addTarget(frame, rect); + + % un-pause + paused = false; + end + + function onType(hfig, e) + %ONTYPE Event handler for key press on figure + + switch e.Key + case {'q', 'escape'} + close(hfig); + + case 'h' + onHelp(); + + case {'space', 'p'} + disp('Toggle pause...'); + paused = ~paused; + + case {'c', 'r'} + disp('Clearing tracker...'); + tracker.clear(); + end + end + + function onHelp() + %ONHELP Display usage help dialog + + h = helpdlg({ + 'Select object(s) to track using the mouse.' + 'Hot keys:' + ' q - quit' + ' h - help' + ' p - pause' + ' c - clear targets' + }); + + % wait for user to accept dialog + set(h, 'WindowStyle','modal'); + waitfor(h); + end +end + +%% Helper function + +function img = drawOverlay(img, tr) + %DRAWOVERLAY Draw a 3D house on top of tracked object to show its pose + % + % img = drawOverlay(img, tr) + % + % ## Input + % * __img__ image on which to draw + % * __tr__ tracked target structure + % + % ## Output + % * __img__ output image + % + + if true + % simple model of a house (cube with a triangular prism roof) + ar_vertices = [ + 0 0 0; 0 1 0; 1 1 0; 1 0 0; ... + 0 0 1; 0 1 1; 1 1 1; 1 0 1; ... + 0 0.5 2; 1 0.5 2 + ]; + ar_edges = [ + 0 1; 1 2; 2 3; 3 0; ... + 4 5; 5 6; 6 7; 7 4; ... + 0 4; 1 5; 2 6; 3 7; ... + 4 8; 5 8; 6 9; 7 9; 8 9 + ]; + else + % simple XYZ axes + ar_vertices = [0 0 0; 1 0 0; 0 1 0; 0 0 1]; + ar_edges = [0 1; 0 2; 0 3]; + end + + % camera matrix (assumes planar tracked object wrt camera plane) + [h,w,~] = size(img); + fx = 1.0; % adjust camera focal length for proper augmentation [0.5, 1.0] + K = [fx*w, 0, 0.5*(w-1); ... + 0, fx*w, 0.5*(h-1); ... + 0, 0, 1]; + + % find object pose from corresponding 3D/2D points + rect = tr.target.rect; + quad_3d = rect([1 2; 3 2; 3 4; 1 4]); + quad_3d(:,3) = 0; % Z=0 for 3D points in target image + [rvec, tvec] = cv.solvePnP(quad_3d, tr.quad, K); + + % scale and translate house vertices, to place it on top of target object + % in target image coordinates, with origin being top-left corner in + % rectangle plane: + % 1 unit in x-dir = object width + % 1 unit in y-dir = object height + % 1 unit in z-dir = 0.3 * object width (in opposite cam z-direction) + xyzScale = rect(3:4) - rect(1:2); + xyzScale(3) = -0.3 * max(xyzScale); + verts = bsxfun(@times, ar_vertices, xyzScale); + verts = bsxfun(@plus, verts, [rect(1:2) 0]); + + % project house 3d points to 2d points in new frame coordinates + verts = cv.projectPoints(verts, rvec, tvec, K); + + % connect and draw house edges in new frame + pts1 = verts(ar_edges(:,1)+1,:); + pts2 = verts(ar_edges(:,2)+1,:); + img = cv.line(img, pts1, pts2, 'Color',[255 255 0], 'Thickness',2); +end diff --git a/samples/plane_tracker_demo.m b/samples/plane_tracker_demo.m new file mode 100644 index 000000000..57ef2e93a --- /dev/null +++ b/samples/plane_tracker_demo.m @@ -0,0 +1,71 @@ +%% Multi-target planar tracking +% +% Example of using features2d framework for multiple planar targets tracking +% in a video using homography matching. This sample uses the |PlaneTracker| +% class. +% +% Sample video: . +% +% Sources: +% +% * +% + +% video file, and multiple targets to track [x,y,w,h] +vid = fullfile(mexopencv.root(), 'test', 'blais.mp4'); +assert(exist(vid, 'file') == 2, 'Missing video file'); +win = [ + 136 0 366 433; % book + 135 165 285 175 % face +]; +N = size(win,1); + +% open video feed, and get firs frame +cap = cv.VideoCapture(vid); +pause(1); +assert(cap.isOpened(), 'Failed to open video'); +frame = cap.read(); +assert(~isempty(frame), 'Failed to read frames'); + +% create and initialize tracker +tracker = PlaneTracker(); +for i=1:N + tracker.addTarget(frame, win(i,:)); +end + +% prepare plot +hImg = imshow(frame); +clr = lines(N); +for i=1:N + % keypoints and bounding box for each tracked target + hPts(i) = line(NaN, NaN, 'Color',clr(i,:), 'LineStyle','none', 'Marker','o'); + hLin(i) = line(NaN, NaN, 'Color',clr(i,:), 'LineWidth',3); +end + +% main loop +while ishghandle(hImg) + % read new frame + frame = cap.read(); + if isempty(frame), break; end + + % track and update keypoints/boundaries of matched targets + tracked = tracker.track(frame); + for i=1:numel(tracked) + tr = tracked(i); + set(hPts(tr.index), 'XData',tr.pt1(:,1), 'YData',tr.pt1(:,2)); + set(hLin(tr.index), ... + 'XData',tr.quad([1:end 1],1), 'YData',tr.quad([1:end 1],2)); + end + + % update tracked objects which were not found in current frame + idx = setdiff(1:N, [tracked.index]); + if ~isempty(idx) + set(hPts(idx), 'XData',NaN, 'YData',NaN); + set(hLin(idx), 'XData',NaN, 'YData',NaN); + end + + % display result + set(hImg, 'CData',frame); + drawnow; +end +cap.release(); diff --git a/samples/pointPolygonTest_demo.m b/samples/pointPolygonTest_demo.m index 2075edc41..4f2a8b480 100644 --- a/samples/pointPolygonTest_demo.m +++ b/samples/pointPolygonTest_demo.m @@ -3,8 +3,10 @@ % In this sample you will learn how to use the OpenCV function % . % -% , -% +% Sources: +% +% * +% * % %% @@ -21,7 +23,7 @@ %% % get the contours [contours, hierarchy] = cv.findContours(src, 'Mode','Tree', 'Method','Simple'); -contours = cellfun(@(C) cat(1,C{:}), contours, 'Uniform',false); +contours = cellfun(@(C) cat(1,C{:}), contours, 'UniformOutput',false); assert(~isempty(contours)); %% diff --git a/samples/polar_transforms_demo.m b/samples/polar_transforms_demo.m index 07ae95724..4b9328eaa 100644 --- a/samples/polar_transforms_demo.m +++ b/samples/polar_transforms_demo.m @@ -3,8 +3,10 @@ % % This program illustrates Linear-Polar and Log-Polar image transforms. % -% -% +% Sources: +% +% * +% * % %% diff --git a/samples/pyramids_blending.m b/samples/pyramids_blending.m index 00c248565..561006907 100644 --- a/samples/pyramids_blending.m +++ b/samples/pyramids_blending.m @@ -6,7 +6,9 @@ % * We will use Image pyramids to create a new fruit, "Orapple" % * We will see these functions: |cv.pyrUp|, |cv.pyrDown|, |cv.buildPyramid| % -% +% Sources: +% +% * % %% Theory diff --git a/samples/pyramids_demo_gui.m b/samples/pyramids_demo_gui.m index e6bccd33f..6a930db5c 100644 --- a/samples/pyramids_demo_gui.m +++ b/samples/pyramids_demo_gui.m @@ -3,8 +3,10 @@ % In this demo, we show how to use the functions |cv.pyrUp| and |cv.pyrDown| % to downsample or upsample a given image. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -44,7 +46,7 @@ % Imagine the pyramid as a set of layers in which the higher the layer, the % smaller the size. % -% <> +% <> % % Every layer is numbered from bottom to top, so layer $(i+1)$ (denoted as % $G_{i+1}$ is smaller than layer $i$ ($G_{i}$). diff --git a/samples/pyrlk_optical_flow_demo.m b/samples/pyrlk_optical_flow_demo.m index e7106a293..be7a4dd78 100644 --- a/samples/pyrlk_optical_flow_demo.m +++ b/samples/pyrlk_optical_flow_demo.m @@ -1,7 +1,9 @@ %% Lucas-Kanade Sparse Optical Flow % -% -% +% Sources: +% +% * +% * % function pyrlk_optical_flow_demo() diff --git a/samples/remap_demo.m b/samples/remap_demo.m index 9826d2a79..b4724b038 100644 --- a/samples/remap_demo.m +++ b/samples/remap_demo.m @@ -1,10 +1,12 @@ -%% Remapping +%% Image Remapping % % In this demo, we show how to use the OpenCV function |cv.remap| to implement % simple remapping routines. % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -24,19 +26,19 @@ % the mapping function that operates on $(x,y)$. % % Let's think in a quick example. Imagine that we have an image $I$ and, say -% we want to do a remap such that: +% we want to do a remap such that: % % $$h(x,y) = (I.cols - x, y )$$ % % What would happen? It is easily seen that the image would flip in the $x$ % direction. For instance, consider the input image: % -% <> +% <> % % observe how the red circle changes positions with respect to x (considering % $x$ the horizontal direction): % -% <> +% <> % % In OpenCV, the function |cv.remap| offers a simple remapping implementation. % diff --git a/samples/remap_fun_demo.m b/samples/remap_fun_demo.m index 8aad6b9b6..ece321bb7 100644 --- a/samples/remap_fun_demo.m +++ b/samples/remap_fun_demo.m @@ -11,7 +11,9 @@ % shrink to fit into the circle. % * convert the modified r-theta values back into x-y coordinates % -% +% Sources: +% +% * % %% diff --git a/samples/segment_objects_demo.m b/samples/segment_objects_demo.m index 10d8bf5f9..dd6f97589 100644 --- a/samples/segment_objects_demo.m +++ b/samples/segment_objects_demo.m @@ -6,7 +6,9 @@ % When the program starts, it begins learning the background. % You can toggle background learning on and off using the checkbox. % -% +% Sources: +% +% * % %% Set up video source: video file or camera diff --git a/samples/shape_context_demo.m b/samples/shape_context_demo.m index 21d22d65d..8b337738f 100644 --- a/samples/shape_context_demo.m +++ b/samples/shape_context_demo.m @@ -1,11 +1,13 @@ -%% Shape context demo for shape matching +%% Shape context for shape matching % This program demonstrates a method for shape comparison based on Shape % Context. % % We use 20 sample images from OpenCV: % https://github.com/opencv/opencv/tree/3.1.0/samples/data/shape_sample/*.png % -% +% Sources: +% +% * % %% diff --git a/samples/smiledetect_demo.m b/samples/smiledetect_demo.m index c832333ff..ef2f81da7 100644 --- a/samples/smiledetect_demo.m +++ b/samples/smiledetect_demo.m @@ -1,10 +1,12 @@ -%% Smile Detection demo +%% Smile Detection % This program demonstrates the smile detector. % % NOTE: Smile intensity will only be valid after a first smile has been % detected. % -% +% Sources: +% +% * % %% Options @@ -95,7 +97,7 @@ faces = cascadeF.detect(gray, detectOpts{:}); if tryflip faces2 = cascadeF.detect(cv.flip(gray, 1), detectOpts{:}); - faces2 = cellfun(@(r) [w-r(1)-r(3) r(2:4)], faces2, 'Uniform',false); + faces2 = cellfun(@(r) [w-r(1)-r(3) r(2:4)], faces2, 'UniformOutput',false); faces = [faces(:); faces2(:)]; end toc diff --git a/samples/smoothing_demo.m b/samples/smoothing_demo.m index 5b21642db..3facb3289 100644 --- a/samples/smoothing_demo.m +++ b/samples/smoothing_demo.m @@ -1,7 +1,9 @@ %% Smoothing Demo % Sample code for simple filters % -% +% Sources: +% +% * % %% diff --git a/samples/sobel_derivatives_demo.m b/samples/sobel_derivatives_demo.m index 9d30f8359..292871bb2 100644 --- a/samples/sobel_derivatives_demo.m +++ b/samples/sobel_derivatives_demo.m @@ -7,8 +7,10 @@ % * Use the OpenCV function |cv.Scharr| to calculate a more accurate % derivative for a kernel of size 3x3 % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -22,7 +24,7 @@ % derivatives in an image? Let's imagine we want to detect the _edges_ present % in the image. For instance: % -% <> +% <> % % You can easily notice that in an _edge_, the pixel intensity _changes_ in a % notorious way. A good way to express _changes_ is by using _derivatives_. A @@ -31,12 +33,12 @@ % To be more graphical, let's assume we have a 1D-image. An edge is shown by % the "jump" in intensity in the plot below: % -% <> +% <> % % The edge "jump" can be seen more easily if we take the first derivative % (actually, here appears as a maximum) % -% <> +% <> % % So, from the explanation above, we can deduce that a method to detect edges % in an image can be performed by locating pixel locations where the gradient @@ -89,7 +91,8 @@ % -3 & 0 & +3 \cr % -10 & 0 & +10 \cr % -3 & 0 & +3 -% }}\right]$$; +% }}\right]$$ +% % $$G_{y} = \left[{\matrix{ % -3 & -10 & -3 \cr % 0 & 0 & 0 \cr diff --git a/samples/squares_detector_demo.m b/samples/squares_detector_demo.m index 9726ab91a..94511cf66 100644 --- a/samples/squares_detector_demo.m +++ b/samples/squares_detector_demo.m @@ -1,4 +1,4 @@ -%% Square Detector demo +%% Squares Detector % It loads several images sequentially and tries to find squares in each % image. % @@ -6,9 +6,11 @@ % (it's got it all folks) to find squares in a list of images |pic*.png|. % Returns sequence of squares detected on the image. % -% , -% , -% +% Sources: +% +% * +% * +% * % function squares_detector_demo() diff --git a/samples/stereo_calibration_demo.m b/samples/stereo_calibration_demo.m index 5d196aa56..c1ea1c16d 100644 --- a/samples/stereo_calibration_demo.m +++ b/samples/stereo_calibration_demo.m @@ -1,4 +1,4 @@ -%% Stereo Calibration demo +%% Stereo Calibration % Demonstration of stereo calibration, rectification, and correspondence. % % You will learn how to use the following OpenCV functions and classes: @@ -16,8 +16,10 @@ % * |cv.StereoBM|, |cv.StereoSGBM| % * |cv.reprojectImageTo3D| % -% , -% +% Sources: +% +% * +% * % % See also: stereoCameraCalibrator % diff --git a/samples/stereo_match_demo.m b/samples/stereo_match_demo.m index adfc1aaea..b197526a8 100644 --- a/samples/stereo_match_demo.m +++ b/samples/stereo_match_demo.m @@ -6,11 +6,13 @@ % Resulting .ply file can also be viewed using % . % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Theory @@ -18,9 +20,9 @@ % Previously, we saw basic concepts like epipolar constraints and other % related terms. We also saw that if we have two images of same scene, we can % get depth information from that in an intuitive way. Below is an image and -% some simple mathematical formulas which proves that intuition: +% some simple mathematical formulas which prove that intuition: % -% <> +% <> % % The above diagram contains equivalent triangles. Writing their equivalent % equations will yield us following result: @@ -30,7 +32,7 @@ % $x$ and $x'$ are the distance between points in image plane corresponding to % the scene point 3D and their camera center. $B$ is the distance between two % cameras (which we know) and $f$ is the focal length of camera (already -% known). So in short, above equation says that the depth of a point in a +% known). So in short, the above equation says that the depth of a point in a % scene is inversely proportional to the difference in distance of % corresponding image points and their camera centers. So with this % information, we can derive the depth of all pixels in an image. diff --git a/samples/stitching_demo.m b/samples/stitching_demo.m index ecc4afa9f..fee3172b1 100644 --- a/samples/stitching_demo.m +++ b/samples/stitching_demo.m @@ -1,20 +1,24 @@ -%% Rotation model images stitcher +%% Simple rotation model images stitcher % A basic example on image stitching. % % In this demo, we show how to use the high-level stitching API provided by % |cv.Stitcher|, and we learn how to use preconfigured stitcher configurations % to stitch images using different camera models. % -% -% +% Sources: +% +% * +% * % %% Input images (two or more) -im1 = imread(fullfile(mexopencv.root(),'test','b1.jpg')); -im2 = imread(fullfile(mexopencv.root(),'test','b2.jpg')); -%imshow(cat(2, im1, im2)) -subplot(121), imshow(im1) -subplot(122), imshow(im2) +imgs = { + imread(fullfile(mexopencv.root(),'test','b1.jpg')), ... + imread(fullfile(mexopencv.root(),'test','b2.jpg')) +}; +for i=1:numel(imgs) + subplot(1,numel(imgs),i), imshow(imgs{i}) +end %% Options @@ -27,12 +31,27 @@ % stitching materials under affine transformation, such as scans. smode = 'Panorama'; +% Internally create three chunks of each image to increase stitching success +divide_images = false; + +%% +if divide_images + for i=1:numel(imgs) + sz = size(imgs{i}); + imgs{i} = { + cv.Rect.crop(imgs{i}, [0 0 sz(2)/2 sz(1)]), ... + cv.Rect.crop(imgs{i}, [sz(2)/3 0 sz(2)/2 sz(1)]), ... + cv.Rect.crop(imgs{i}, [sz(2)/2 0 sz(2)/2 sz(1)]) + }; + end + imgs = [imgs{:}]; +end + %% Stitch stitcher = cv.Stitcher('Mode',smode, 'TryUseGPU',try_use_gpu); tic -pano = stitcher.stitch({im1, im2}); +pano = stitcher.stitch(imgs); toc %% Panorama result -figure -imshow(pano) +figure, imshow(pano) diff --git a/samples/stitching_detailed_demo.m b/samples/stitching_detailed_demo.m index 56b17ea9d..c834c6711 100644 --- a/samples/stitching_detailed_demo.m +++ b/samples/stitching_detailed_demo.m @@ -1,81 +1,81 @@ %% Rotation model images stitcher % A detailed example on image stitching. % -% +% Sources: +% +% * % % See also: -% +% % %% % Description of parameters: % % * *preview* (true|false) -% Run stitching in the preview mode. Works faster than usual mode, -% but output image will have lower resolution. +% Run stitching in the preview mode. Works faster than usual mode, but output +% image will have lower resolution. % * *try_cuda* (true|false) -% Try to use CUDA. The default value is false. All default values -% are for CPU mode. +% Try to use CUDA. The default value is false. All default values are for CPU +% mode. % * *work_megapix* (float) -% Resolution for image registration step. The default is 0.6 Mpx. +% Resolution for image registration step. The default is 0.6 Mpx. % * *features_type* (SurfFeaturesFinder|OrbFeaturesFinder|AKAZEFeaturesFinder) -% Type of features used for images matching. The default is SURF. +% Type of features used for images matching. The default is SURF. % * *matcher_type* (homography|affine) -% Matcher used for pairwise image matching. The default is homography. +% Matcher used for pairwise image matching. The default is homography. % * *estimator_type* (HomographyBasedEstimator|AffineBasedEstimator) -% Type of estimator used for transformation estimation. The default is -% homography. +% Type of estimator used for transformation estimation. The default is +% homography. % * *match_conf* (float) -% Confidence for feature matching step. The default is 0.65 for SURF -% and 0.3 for ORB. +% Confidence for feature matching step. The default is 0.65 for SURF and 0.3 +% for ORB. % * *conf_thresh* (float) -% Threshold for two images are from the same panorama confidence. -% The default is 1.0. +% Threshold for two images are from the same panorama confidence. The default +% is 1.0. % * *ba_cost_func* (NoBundleAdjuster|BundleAdjusterReproj|BundleAdjusterRay|BundleAdjusterAffinePartial) -% Bundle adjustment cost function. The default is Ray. +% Bundle adjustment cost function. The default is Ray. % * *ba_refine_mask* (mask) -% Set refinement mask for bundle adjustment. It looks like 'x_xxx', -% where 'x' means refine respective parameter and '_' means don't -% refine one, and has the following format: -% {fx, skew, ppx, aspect, ppy}. The default mask is 'xxxxx'. If bundle -% adjustment doesn't support estimation of selected parameter then -% the respective flag is ignored. +% Set refinement mask for bundle adjustment. It looks like 'x_xxx', where 'x' +% means refine respective parameter and '_' means don't refine one, and has +% the following format: |{fx, skew, ppx, aspect, ppy}|. The default mask is +% 'xxxxx'. If bundle adjustment doesn't support estimation of selected +% parameter then the respective flag is ignored. % * *do_wave_correct* (true|false) -% Default true +% Default true % * *wave_correct* (Horiz|Vert) -% Perform wave effect correction. The default is 'Horiz'. +% Perform wave effect correction. The default is 'Horiz'. % * *save_graph* (true|false) -% Save matches graph represented in DOT language and print it. -% Labels description: Nm is number of matches, Ni is number of inliers, -% C is confidence. +% Save matches graph represented in DOT language and print it. Labels +% description: Nm is number of matches, Ni is number of inliers, C is +% confidence. % * *warp_type* (affine|plane|cylindrical|spherical|fisheye|stereographic| -% compressedPlaneA2B1|compressedPlaneA1.5B1| -% compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1| -% paniniA2B1|paniniA1.5B1|paniniPortraitA2B1| -% paniniPortraitA1.5B1|mercator|transverseMercator) -% Warp surface type. The default is 'spherical'. +% compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1| +% compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1| +% paniniPortraitA1.5B1|mercator|transverseMercator) +% Warp surface type. The default is 'spherical'. % * *seam_megapix* (float) -% Resolution for seam estimation step. The default is 0.1 Mpx. +% Resolution for seam estimation step. The default is 0.1 Mpx. % * *seam_find_type* (no|voronoi|gc_color|gc_colorgrad|dp_color|dp_colorgrad) -% Seam estimation method. The default is 'gc_color'. +% Seam estimation method. The default is 'gc_color'. % * *compose_megapix* (float) -% Resolution for compositing step. Use -1 for original resolution. -% The default is -1. +% Resolution for compositing step. Use -1 for original resolution. The +% default is -1. % * *expos_comp_type* (NoExposureCompensator|GainCompensator|BlocksGainCompensator) -% Exposure compensation method. The default is 'BlocksGainCompensator'. +% Exposure compensation method. The default is 'BlocksGainCompensator'. % * *blend_type* (NoBlender|FeatherBlender|MultiBandBlender) -% Blending method. The default is 'MultiBandBlender'. +% Blending method. The default is 'MultiBandBlender'. % * *blend_strength* (float) -% Blending strength from [0,100] range. The default is 5. +% Blending strength from [0,100] range. The default is 5. % * *output* (filename) -% The default is 'stitching_result.jpg'. +% The default is 'stitching_result.jpg'. % * *timelapse* (true|false) -% Default false +% Default false % * *timelapse_type* (AsIs|Crop) -% Output warped images separately as frames of a time lapse movie, -% with 'fixed_' prepended to input file names +% Output warped images separately as frames of a time lapse movie, with +% 'fixed_' prepended to input file names % * *timelapse_range* (float) -% Range width to limit number of images to match with, default 5 +% Range width to limit number of images to match with, default 5 % %% Images diff --git a/samples/super_resolution_demo.m b/samples/super_resolution_demo.m index 6853c51fc..beecc7b4e 100644 --- a/samples/super_resolution_demo.m +++ b/samples/super_resolution_demo.m @@ -1,7 +1,9 @@ %% Super Resolution demo % This sample demonstrates Super Resolution algorithms for video sequence. % -% +% Sources: +% +% * % %% diff --git a/samples/svm_hog_ocr_digits_demo.m b/samples/svm_hog_ocr_digits_demo.m index 7251421c3..e6bafc2de 100644 --- a/samples/svm_hog_ocr_digits_demo.m +++ b/samples/svm_hog_ocr_digits_demo.m @@ -1,4 +1,4 @@ -%% OCR of hand-written digits using SVM +%% OCR of hand-written digits using HoG and SVM % % In this tutorial, we will build an SVM classifer to recognize % hand-written digits (0 to 9), using Histogram of Oriented Gradients (HOG) @@ -7,9 +7,11 @@ % For additional resources, see this % . % -% , -% , -% +% Sources: +% +% * +% * +% * % function svm_hog_ocr_digits_demo() diff --git a/samples/svm_intro_demo.m b/samples/svm_intro_demo.m index 0ddcee3b4..c88f76ccf 100644 --- a/samples/svm_intro_demo.m +++ b/samples/svm_intro_demo.m @@ -1,14 +1,16 @@ -%% Support Vector Machines +%% Support Vector Machines (SVM) % % In this sample, you will learn how to use the OpenCV function |cv.SVM.train| % to build a classifier based on SVMs and |cv.SVM.predict| to test its % performance. % -% , -% , -% , -% , -% +% Sources: +% +% * +% * +% * +% * +% * % %% Introduction to Support Vector Machines @@ -22,7 +24,7 @@ % following simple problem; For a linearly separable set of 2D-points which % belong to one of two classes, find a separating straight line. % -% <> +% <> % % Note: In this example we deal with lines and points in the Cartesian plane % instead of hyperplanes and vectors in a high dimensional space. This is a @@ -44,13 +46,13 @@ % Therefore, the optimal separating hyperplane _maximizes_ the margin of the % training data. % -% <> +% <> % %% % To undertand how the optimal hyperplane is computed, let's introduce the % notation used to define formally a hyperplane: % -% $$f(x) = \beta_{0} + \beta^{T} x,$$ +% $$f(x) = \beta_{0} + \beta^{T} x$$ % % where $\beta$ is known as the _weight vector_ and $\beta_{0}$ as the _bias_. % @@ -92,9 +94,9 @@ % constraints model the requirement for the hyperplane to classify correctly % all the training examples $x_{i}$. Formally, % -% $$\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2}$$ -% subject to -% $$y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \forall i$$ +% $$\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \\ +% \quad \textrm{subject to} \quad +% y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \; \forall i$$ % % where $y_{i}$ represents each of the labels of the training examples. % @@ -131,9 +133,9 @@ % We start here from the formulation of the optimization problem of finding % the hyperplane which maximizes the *margin*: % -% $$\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2}$$ -% subject to -% $$y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \forall i$$ +% $$\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \\ +% \quad \textrm{subject to} \quad +% y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \; \forall i$$ % % There are multiple ways in which this model can be modified so it takes into % account the misclassification errors. For example, one could think of @@ -158,7 +160,7 @@ % hyperplane and the distances to their correct regions of the samples that % are misclassified. % -% <> +% <> % % Note: Only the distances of the samples that are misclassified are shown in % the picture. The distances of the rest of the samples are zero since they @@ -171,11 +173,11 @@ % % Finally, the new formulation for the optimization problem is: % -% $$\min_{\beta, \beta_{0}} L(\beta) = ||\beta||^{2} + C \sum_{i} {\xi_{i}}$$ -% subject to -% $$y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 - \xi_{i}$$ -% and -% $$\xi_{i} \geq 0 \forall i$$ +% $$\min_{\beta, \beta_{0}} L(\beta) = ||\beta||^{2} + C \sum_{i} {\xi_{i}} \\ +% \quad \textrm{subject to} \quad +% y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 - \xi_{i} \\ +% \quad \textrm{and} \quad +% \xi_{i} \geq 0 \; \forall i$$ % % How should the parameter $C$ be chosen? It is obvious that the answer to % this question depends on how the training data is distributed. Although @@ -206,7 +208,8 @@ % $q=(q_1,q_2)$. Let $\phi$ be a mapping function which maps a two-dimensional % point to three-dimensional space as follows: % -% $$\phi(p) = (p_{1}^2, p_{2}^2, \sqrt{2} p_1 p_2)$$, +% $$\phi(p) = (p_{1}^2, p_{2}^2, \sqrt{2} p_1 p_2)$$ +% % $$\phi(q) = (q_{1}^2, q_{2}^2, \sqrt{2} q_1 q_2)$$ % % Let us define a kernel function $K(p,q)$ which does a dot product between diff --git a/samples/synth_video_demo.m b/samples/synth_video_demo.m new file mode 100644 index 000000000..1798bde08 --- /dev/null +++ b/samples/synth_video_demo.m @@ -0,0 +1,57 @@ +%% Synthetic video demo +% Demonstrates using |VideoSynth| classes as stub for |cv.VideoCapture|. +% Capture object is created using the helper function |createVideoCapture|. +% +% Sources: +% +% * +% + +%% +% create synth video capture object +if true + %cap = createVideoCapture(0, 'aruco'); + cap = createVideoCapture([], 'chess'); +else + %cap = createVideoCapture('synth|class=scene|bg=../../test/lena.jpg|fg=../../test/mask.png|noise=0.1|size=400x400|speed=1'); + %cap = createVideoCapture('synth|class=scene|bg=../../test/fruits.jpg|noise=0.1|size=512x480|speed=1|deformation=true'); + cap = createVideoCapture('synth|class=chess|framecount=500|bg=../../test/lena.jpg|size=320x320|noise=0.1|gridsize=9x6'); +end +assert(cap.isOpened()); + +%% +% set a limit on iterations when we're publising demo +if false && isa(cap, 'VideoSynthBase') + cap.set('FrameCount', 500); +end + +%% +% prepare plot +img = cap.read(); +assert(~isempty(img)); +hImg = imshow(img, 'InitialMagnification',100, 'Border','tight'); +hTxt = text(10, 15, 'FPS = 0', 'FontSize',8, 'Color','y'); + +%% +% main loop +counter = 0; +tID = tic(); +while ishghandle(hImg) + % grab new frame + img = cap.read(); + if isempty(img); break; end + + % frames-per-second + counter = counter + 1; + fps = counter / toc(tID); + + % display + set(hTxt, 'String',sprintf('FPS = %.2f', fps)) + set(hImg, 'CData',img) + if ~mexopencv.isOctave() + drawnow limitrate + else + drawnow + end +end +cap.release(); diff --git a/samples/texture_flow_demo.m b/samples/texture_flow_demo.m index 985831a7b..b4a47254a 100644 --- a/samples/texture_flow_demo.m +++ b/samples/texture_flow_demo.m @@ -3,7 +3,9 @@ % Sample shows how |cv.cornerEigenValsAndVecs| function can be used to % estimate image texture flow direction. % -% +% Sources: +% +% * % %% @@ -47,7 +49,7 @@ % another way to visualize flow figure, imshow(img) if ~mexopencv.isOctave() - alpha(0.5) + alpha(0.5) end hold on, quiver(X, Y, VX, VY, 'Color','k'), hold off title('flow') diff --git a/samples/threshold_demo_gui.m b/samples/threshold_demo_gui.m index 5e6450176..d6ea33ad8 100644 --- a/samples/threshold_demo_gui.m +++ b/samples/threshold_demo_gui.m @@ -5,8 +5,10 @@ % In this sample you will learn how to perform basic thresholding operations % using OpenCV function . % -% -% +% Sources: +% +% * +% * % function varargout = threshold_demo_gui(im) diff --git a/samples/threshold_inrange_demo_gui.m b/samples/threshold_inrange_demo_gui.m index 0195df1a4..973b085bb 100644 --- a/samples/threshold_inrange_demo_gui.m +++ b/samples/threshold_inrange_demo_gui.m @@ -1,4 +1,4 @@ -%% Thresholding Operations using inRange +%% inRange Thresholding Operations % % In this demo, we show how to: % @@ -9,8 +9,10 @@ % other colorspaces like HSV or LAB, where luma and chroma are represented % separately. % -% , -% +% Sources: +% +% * +% * % function varargout = threshold_inrange_demo_gui(im) diff --git a/samples/thresholding_demo.m b/samples/thresholding_demo.m index 07641044f..ea7a95659 100644 --- a/samples/thresholding_demo.m +++ b/samples/thresholding_demo.m @@ -4,7 +4,9 @@ % thresholding, Otsu's thresholding, and how to use corresponding OpenCV % functions: |cv.threshold|, |cv.adaptiveThreshold|, etc. % -% +% Sources: +% +% * % %% Simple Thresholding diff --git a/samples/train_svmsgd_demo_gui.m b/samples/train_svmsgd_demo_gui.m index 8f51dcb5f..559685d53 100644 --- a/samples/train_svmsgd_demo_gui.m +++ b/samples/train_svmsgd_demo_gui.m @@ -1,8 +1,10 @@ -%% SVMSGD Interactive Demo +%% SVMSGD Interactive Classification % Train with SVMSGD algorithm. % The classifier can handle linearly separable 2-class dataset. % -% +% Sources: +% +% * % function varargout = train_svmsgd_demo_gui() @@ -120,6 +122,8 @@ function onMouseDown(~,~) function [weights, shift] = doTrain(samples, responses) %DOTRAIN Train with SVMSGD algorithm % + % [weights, shift] = doTrain(samples, responses) + % % ## Input % * __samples__, __responses__ train set % diff --git a/samples/turing_patterns_demo.m b/samples/turing_patterns_demo.m index 496b2698d..0873f066d 100644 --- a/samples/turing_patterns_demo.m +++ b/samples/turing_patterns_demo.m @@ -1,9 +1,11 @@ -%% Multi-Scale Turing Patterns generator +%% Multi-Scale Turing Patterns Generator % % Inspired by: % % -% +% Sources: +% +% * % function turing_patterns_demo() @@ -28,9 +30,20 @@ function turing_patterns_demo() %% % Prepare video writer if write_vid - vidfile = fullfile(tempdir(), 'turing.avi'); - vid = cv.VideoWriter(vidfile, [sz sz], ... - 'FourCC','DIB ', 'FPS',30, 'Color',false); + if true + % builtin MJPG encoder, should work across all systems + vidext = 'avi'; + fourcc = 'MJPG'; + else + % FFmpeg in OpenCV can use OpenH264 for encoding H.264, + % download binaries and set OPENH264_LIBRARY environment variable + % https://github.com/opencv/opencv/tree/3.3.1/3rdparty/ffmpeg + % https://github.com/cisco/openh264/releases + vidext = 'mp4'; + fourcc = 'H264'; + end + vidfile = fullfile(tempdir(), ['turing.' vidext]); + vid = cv.VideoWriter(vidfile, [sz sz], 'FourCC',fourcc); assert(vid.isOpened(), 'Could not open output video'); end @@ -51,7 +64,10 @@ function turing_patterns_demo() a = (a - min(a(:))) / (max(a(:)) - min(a(:))); % write video frame - if write_vid, vid.write(a); end + if write_vid + frame = repmat(uint8(a*255), [1 1 3]); % 8-bit RGB + vid.write(frame); + end % show result set(hImg, 'CData',a); diff --git a/samples/tvl1_optical_flow_demo.m b/samples/tvl1_optical_flow_demo.m index 9c70ee579..4411f92ae 100644 --- a/samples/tvl1_optical_flow_demo.m +++ b/samples/tvl1_optical_flow_demo.m @@ -1,7 +1,9 @@ %% Optical Flow Estimation using Dual TV-L1 method % -% -% +% Sources: +% +% * +% * % %% Load a pair of images @@ -26,8 +28,13 @@ %% Draw velocities vector field [X,Y,U,V] = drawVelocities(flow); -figure(2), imshowpair(frame0, frame1) -%imshowpair(flow(:,:,1), flow(:,:,2)) +figure(2) +if ~mexopencv.isOctave() && mexopencv.require('images') + imshowpair(frame0, frame1) + %imshowpair(flow(:,:,1), flow(:,:,2)) +else + imshow(cat(3, frame1, frame0, frame1)) +end hold on quiver(X(:), Y(:), U(:), V(:)); hold off diff --git a/samples/video_write_demo.m b/samples/video_write_demo.m index 035c6929f..53d9d937a 100644 --- a/samples/video_write_demo.m +++ b/samples/video_write_demo.m @@ -10,9 +10,11 @@ % As a simple demonstration we will just extract one of the BGR color channels % of an input video file or webcam feed into a new video. % -% , -% , -% +% Sources: +% +% * +% * +% * % %% @@ -35,7 +37,7 @@ % Codec). The full list of codecs you may use on a system depends on just what % one you have installed. % -% <> +% <> % % As you can see things can get really complicated with videos. However, % OpenCV is mainly a computer vision library, not a video stream, codec and @@ -75,7 +77,7 @@ % value "minus one" then a window will pop up at runtime that contains all % the codec installed on your system and ask you to select the one to use: % -% <> +% <> % % Afterwards, you can use the |cv.VideoWriter.isOpened| function to find out % if the open operation succeeded or not. The video file automatically closes diff --git a/samples/videostab_demo.m b/samples/videostab_demo.m index aec66e187..75d7ebb6a 100644 --- a/samples/videostab_demo.m +++ b/samples/videostab_demo.m @@ -1,134 +1,132 @@ %% Video Stabilizer % -% +% Sources: +% +% * % %% % Description of parameters: % % * *model*=(Translation|TranslationAndScale|Rigid|Similarity|Affine|Homography) -% Set motion model. The default is 'Affine'. +% Set motion model. The default is 'Affine'. % * *lin_prog_motion_est*=(true|false) -% Turn on/off LP based motion estimation. Requires CLP library. -% The default is false. +% Turn on/off LP based motion estimation. Requires CLP library. +% The default is false. % * *subset*=(|auto) -% Number of random samples per one motion hypothesis. -% The default is 'auto'. +% Number of random samples per one motion hypothesis. The default is 'auto'. % * *thresh*=(|auto) -% Maximum error to classify match as inlier. The default is 'auto'. +% Maximum error to classify match as inlier. The default is 'auto'. % * *outlier_ratio*= -% Motion estimation outlier ratio hypothesis. The default is 0.5. +% Motion estimation outlier ratio hypothesis. The default is 0.5. % * *min_inlier_ratio*= -% Minimum inlier ratio to decide if estimated motion is OK. -% The default is 0.1. +% Minimum inlier ratio to decide if estimated motion is OK. +% The default is 0.1. % * *nkps*= -% Number of keypoints to find in each frame. The default is 1000. +% Number of keypoints to find in each frame. The default is 1000. % * *extra_kps*= -% Default is 0. +% Default is 0. % * *local_outlier_rejection*=(true|false) -% Perform local outlier rejection. The default is false. +% Perform local outlier rejection. The default is false. % % * *save_motions*= -% Save estimated motions into file. The default is ''. +% Save estimated motions into file. The default is ''. % * *load_motions*= -% Load motions from file. The default is ''. +% Load motions from file. The default is ''. % % * *radius*= -% Set sliding window radius. The default is 15. +% Set sliding window radius. The default is 15. % * *stdev*=(|auto) -% Set smoothing weights standard deviation. The default is 'auto' -% (i.e. sqrt(radius)). +% Set smoothing weights standard deviation. The default is 'auto' +% (i.e. sqrt(radius)). % * *lin_prog_stab*=(true|false) -% Turn on/off linear programming based stabilization method. -% Requires CLP library. Default false +% Turn on/off linear programming based stabilization method. +% Requires CLP library. Default false % * *lps_trim_ratio*=(|auto) -% Trimming ratio used in linear programming based method. +% Trimming ratio used in linear programming based method. % * *lps_w1*= -% 1st derivative weight. The default is 1. +% 1st derivative weight. The default is 1. % * *lps_w2*= -% 2nd derivative weight. The default is 10. +% 2nd derivative weight. The default is 10. % * *lps_w3*= -% 3rd derivative weight. The default is 100. +% 3rd derivative weight. The default is 100. % * *lps_w4*= -% Non-translation motion components weight. The default is 100. +% Non-translation motion components weight. The default is 100. % % * *deblur*=(true|false) -% Do deblurring. +% Do deblurring. % * *deblur_sens*= -% Set deblurring sensitivity (from 0 to +inf). The default is 0.1. +% Set deblurring sensitivity (from 0 to +inf). The default is 0.1. % % * *trim_ratio*= -% Set trimming ratio (from 0 to 0.5). The default is 0.1. +% Set trimming ratio (from 0 to 0.5). The default is 0.1. % * *est_trim*=(true|false) -% Estimate trim ratio automatically. The default is true. +% Estimate trim ratio automatically. The default is true. % * *incl_constr*=(true|false) -% Ensure the inclusion constraint is always satisfied. -% The default is false. +% Ensure the inclusion constraint is always satisfied. The default is false. % % * *border_mode*=(Replicate|Reflect|Constant) -% Set border extrapolation mode. The default is 'Replicate'. +% Set border extrapolation mode. The default is 'Replicate'. % % * *mosaic*=(true|false) -% Do consistent mosaicing. The default is false. +% Do consistent mosaicing. The default is false. % * *mosaic_stdev*= -% Consistent mosaicing stdev threshold. The default is 10.0. +% Consistent mosaicing stdev threshold. The default is 10.0. % % * *motion_inpaint*=(true|false) -% Do motion inpainting (requires CUDA support). The default is false. +% Do motion inpainting (requires CUDA support). The default is false. % * *mi_dist_thresh*= -% Estimated flow distance threshold for motion inpainting. -% The default is 5.0. +% Estimated flow distance threshold for motion inpainting. +% The default is 5.0. % % * *color_inpaint*=(no|average|ns|telea) -% Do color inpainting. The defailt is 'no'. +% Do color inpainting. The defailt is 'no'. % * *ci_radius*= -% Set color inpainting radius (for 'ns' and 'telea' options only). -% The default is 2.0 +% Set color inpainting radius (for 'ns' and 'telea' options only). +% The default is 2.0 % % * *wobble_suppress*=(true|false) -% Perform wobble suppression. The default is false. +% Perform wobble suppression. The default is false. % * *ws_lin_prog_motion_est*=(true|false) -% Turn on/off LP based motion estimation. Requires CLP library. -% The default is false. +% Turn on/off LP based motion estimation. Requires CLP library. +% The default is false. % * *ws_period*= -% Set wobble suppression period. The default is 30. +% Set wobble suppression period. The default is 30. % * *ws_model*=(Translation|TranslationAndScale|Rigid|Similarity|Affine|Homography) -% Set wobble suppression motion model (must have more DOF than motion -% estimation model). The default is 'Homography'. +% Set wobble suppression motion model (must have more DOF than motion +% estimation model). The default is 'Homography'. % * *ws_subset*=(|auto) -% Number of random samples per one motion hypothesis. -% The default is 'auto'. +% Number of random samples per one motion hypothesis. The default is 'auto'. % * *ws_thresh*=(|auto) -% Maximum error to classify match as inlier. The default is 'auto'. +% Maximum error to classify match as inlier. The default is 'auto'. % * *ws_outlier_ratio*= -% Motion estimation outlier ratio hypothesis. The default is 0.5. +% Motion estimation outlier ratio hypothesis. The default is 0.5. % * *ws_min_inlier_ratio*= -% Minimum inlier ratio to decide if estimated motion is OK. -% The default is 0.1. +% Minimum inlier ratio to decide if estimated motion is OK. +% The default is 0.1. % * *ws_nkps*= -% Number of keypoints to find in each frame. The default is 1000. +% Number of keypoints to find in each frame. The default is 1000. % * *ws_extra_kps*= -% Default is 0. +% Default is 0. % * *ws_local_outlier_rejection*=(true|false) -% Perform local outlier rejection. The default is false. +% Perform local outlier rejection. The default is false. % % * *save_motions2*= -% Save motions estimated for wobble suppression. The default is ''. +% Save motions estimated for wobble suppression. The default is ''. % * *load_motions2*= -% Load motions for wobble suppression from file. The default is ''. +% Load motions for wobble suppression from file. The default is ''. % % * *gpu*=(true|false) -% Use CUDA optimization whenever possible. The default is false. +% Use CUDA optimization whenever possible. The default is false. % % * *output*= -% Set output file path explicitely. The default is 'stabilized.avi'. +% Set output file path explicitely. The default is 'stabilized.avi'. % * *fps*=(|auto) -% Set output video FPS explicitely. -% By default the source FPS is used (auto). +% Set output video FPS explicitely. By default the source FPS is used (auto). % * *quiet* -% Don't show output video frames. +% Don't show output video frames. % * *logger*=(LogToMATLAB|NullLog) -% Log message to specified sink. Default is 'NullLog'. +% Log message to specified sink. Default is 'NullLog'. % % Note: some configurations lead to two passes, some to single pass. % diff --git a/samples/warp_affine_demo.m b/samples/warp_affine_demo.m index 9f2aa4e57..1874a3a34 100644 --- a/samples/warp_affine_demo.m +++ b/samples/warp_affine_demo.m @@ -1,4 +1,4 @@ -%% Affine Transformations +%% Image Affine Transformation % % In this demo, we show how to: % @@ -7,8 +7,10 @@ % * Use the OpenCV function |cv.getRotationMatrix2D| to obtain a 2x3 rotation % matrix % -% , -% +% Sources: +% +% * +% * % %% Theory @@ -33,8 +35,7 @@ % a_{00} & a_{01} \cr % a_{10} & a_{11} % }}\right]_{2 \times 2} -% $$ -% $$ +% \qquad % B = \left[{\matrix{ % b_{00} \cr % b_{10} @@ -57,7 +58,9 @@ % same with: % % $$T = A \cdot \left[{\matrix{x \cr y}}\right] + B$$ +% % or +% % $$T = M \cdot [x, y, 1]^{T}$$ % % $$T = \left[{\matrix{ @@ -81,7 +84,7 @@ % images, we can analyze the simplest case in which it relates three points in % both images. Look at the figure below: % -% <> +% <> % % the points 1, 2 and 3 (forming a triangle in image 1) are mapped into % image 2, still forming a triangle, but now they have changed notoriously. If diff --git a/samples/warp_perspective_demo_gui.m b/samples/warp_perspective_demo_gui.m new file mode 100644 index 000000000..8aea92f4f --- /dev/null +++ b/samples/warp_perspective_demo_gui.m @@ -0,0 +1,188 @@ +%% Image Warping +% A demo program shows how perspective transformation applied on an image. +% Based on a sample code from +% . +% +% Sources: +% +% * +% + +function varargout = warp_perspective_demo_gui(im) + % load source image and set initial ROI corners (4 points clockwise) + roi = []; + if nargin < 1 + if true + im = fullfile(mexopencv.root(), 'test', 'books_right.jpg'); + roi = [360 109; 532 138; 460 417; 317 338]; + else + im = fullfile(mexopencv.root(), 'test', 'box_in_scene.png'); + roi = [243 188; 328 151; 388 294; 319 351]; + end + img = cv.imread(im, 'Color',true); + elseif ischar(im) + img = cv.imread(im, 'Color',true); + else + img = im; + end + if isempty(roi) + roi = bsxfun(@rdivide, [size(img,2) size(img,1)], ... + [1.7 4.2; 1.15 3.32; 1.33 1.1; 1.93 1.36]); + end + + % create the UI and hook event handlers + h = buildGUI(img, roi); + if nargout > 0, varargout{1} = h; end + opts = {'Interruptible','off', 'BusyAction','cancel'}; + set(h.fig, 'CloseRequestFcn',@(~,~) delete(h.fig), ... + 'WindowKeyPressFcn',@onType, opts{:}); + set(h.fig(1), 'WindowButtonDownFcn',@onMouseDown, opts{:}); + + + %% Callback Functions + + function redraw() + %REDRAW Apply transformation using current ROI and display results + + % reapply transformation + out1 = drawROI(img, roi); + [out2, sz] = warpROI(img, roi); + + % show results and ajust second plot to fit image + set(h.img(1), 'CData',out1); + set(h.img(2), 'CData',out2, 'XData',[1 sz(1)], 'YData',[1 sz(2)]); + set(h.ax(2), 'XLim',[0 sz(1)]+0.5, 'YLim',[0 sz(2)]+0.5); + pos = get(h.fig(2), 'Position'); + set(h.fig(2), 'Position',[pos(1:2) sz]); + drawnow; + end + + function onType(~,e) + %ONTYPE Event handler for key press on figure + + switch e.Key + case 'h' + helpdlg({ + 'Use your mouse to select a point and move it' + 'to see transformation changes.' + '' + 'Hot keys:' + 'h - this help dialog' + 'q - quit the program' + 'r - change order of points to rotate transformation' + 'i - change order of points to invert transformation ' + }); + case {'q', 'escape'} + close(h.fig); + case 'r' + roi = circshift(roi, -1); + redraw(); + case 'i' + roi = roi([2 1 4 3],:); + redraw(); + end + end + + function onMouseDown(~,~) + %ONMOUSEDOWN Event handler for mouse down on figure + + % hit-test for closest ROI corner + pt = getCurrentPoint(h.ax(1)); + d = sum(abs(bsxfun(@minus, roi, pt)), 2); + [mn, pt_idx] = min(d); + if mn < 20 + % attach event handlers, and change mouse pointer + set(h.fig(1), 'Pointer','cross', ... + 'WindowButtonMotionFcn',{@onMouseMove, pt_idx}, ... + 'WindowButtonUpFcn',@onMouseUp); + end + end + + function onMouseMove(~,~,idx) + %ONMOUSEMOVE Event handler for mouse move on figure + + % move specified ROI corner + pt = getCurrentPoint(h.ax(1)); + roi(idx,:) = pt; + redraw(); + end + + function onMouseUp(~,~) + %ONMOUSEUP Event handler for mouse up on figure + + % detach event handlers, and restore mouse pointer + set(h.fig(1), 'Pointer','arrow', ... + 'WindowButtonMotionFcn','', ... + 'WindowButtonUpFcn',''); + end +end + +%% Helper Functions + +function out = drawROI(img, pts) + %DRAWROI Show ROI corners with labels + + labels = {'TL', 'TR', 'BR', 'BL'}; + out = cv.polylines(img, pts, 'Closed',true, 'Color',[0 0 255], 'Thickness',2); + out = cv.circle(out, pts, 5, 'Color',[0 255 0], 'Thickness',3); + out = cv.putText(out, labels, pts, ... + 'FontScale',0.8, 'Color',[255 0 0], 'Thickness',2); +end + +function [out, dsz] = warpROI(img, srcPts) + %WARPROI Compute warped image from ROI + + % map ROI corners to corresponding destination rectangle + len = diff(srcPts([1:end 1],:), 1, 1); % [TR-TL; BR-TR; BL-BR; TL-BL] + len = cellfun(@norm, num2cell(len, 2)); % lengths of sides + w = max(len(1), len(3)); + h = max(len(2), len(4)); + dstPts = [0 0; w 0; w h; 0 h]; + + % compute homography between points and apply persepective transformation + dsz = round([w h]); + H = cv.findHomography(srcPts, dstPts); + out = cv.warpPerspective(img, H, 'DSize',dsz); +end + +function p = getCurrentPoint(ax) + %GETCURRENTPOINT Retrieve current mouse location + + p = get(ax, 'CurrentPoint'); + p = p(1,1:2) - 1; +end + +function h = buildGUI(img, roi) + %BUILDGUI Creates the UI + + % apply initial perspective transformation + out1 = drawROI(img, roi); + out2 = warpROI(img, roi); + sz1 = size(out1); + sz2 = size(out2); + + % properties + fprops = {'Menubar','none', 'Resize','on'}; + aprops = {'Units','normalized', 'Position',[0 0 1 1]}; + h = struct(); + + % show input image + ROI corners + h.fig(1) = figure('Name','Image', ... + 'Position',[100 200 sz1(2) sz1(1)], fprops{:}); + h.ax(1) = axes('Parent',h.fig(1), aprops{:}); + if mexopencv.isOctave() + h.img(1) = imshow(out1); + else + h.img(1) = imshow(out1, 'Parent',h.ax(1)); + end + + % show warped image + h.fig(2) = figure('Name','Warped', ... + 'Position',[200+sz1(2) 200 sz2(2) sz2(1)], fprops{:}); + h.ax(2) = axes('Parent',h.fig(2), aprops{:}); + if mexopencv.isOctave() + h.img(2) = imshow(out2); + else + h.img(2) = imshow(out2, 'Parent',h.ax(2)); + end +end diff --git a/samples/watershed_demo_gui.m b/samples/watershed_demo_gui.m index 30acba205..dec5323d6 100644 --- a/samples/watershed_demo_gui.m +++ b/samples/watershed_demo_gui.m @@ -1,11 +1,13 @@ -%% Watershed demo +%% Watershed Demo % An example using the watershed algorithm. % % This program demonstrates the famous watershed segmentation algorithm % in OpenCV. % -% , -% +% Sources: +% +% * +% * % function varargout = watershed_demo_gui(im) diff --git a/samples/watershed_segmentation_demo.m b/samples/watershed_segmentation_demo.m index 172c77fa5..4193f7100 100644 --- a/samples/watershed_segmentation_demo.m +++ b/samples/watershed_segmentation_demo.m @@ -12,9 +12,11 @@ % * Use the OpenCV function |cv.watershed| in order to isolate objects in the % image from the background % -% , -% , -% +% Sources: +% +% * +% * +% * % %% @@ -84,7 +86,7 @@ % find total markers D = uint8(D * 255); contours = cv.findContours(D, 'Mode','External', 'Method','Simple'); -contours = cellfun(@(C) cat(1,C{:}), contours, 'Uniform',false); +contours = cellfun(@(C) cat(1,C{:}), contours, 'UniformOutput',false); N = numel(contours); %% diff --git a/samples/weiner_deconvolution_demo_gui.m b/samples/weiner_deconvolution_demo_gui.m index 9b5e9bbf3..40dd515dc 100644 --- a/samples/weiner_deconvolution_demo_gui.m +++ b/samples/weiner_deconvolution_demo_gui.m @@ -1,7 +1,7 @@ -%% Wiener deconvolution +%% Wiener Deconvolution for Image Deblurring % % Sample shows how DFT can be used to perform -% +% % of an image with user-defined point spread function (PSF). % % Use controls to adjust PSF parameters, and swtich between linear/cirular PSF. @@ -9,7 +9,9 @@ % See also: |deconvwnr|, % % -% +% Sources: +% +% * % function varargout = weiner_deconvolution_demo_gui(im) @@ -55,9 +57,12 @@ function img = blur_edge(img, d) %BLUR_EDGE Blur image edges to reduce ringing effect in deblurred image % + % img = blur_edge(img) + % img = blur_edge(img, d) + % % ## Input % * __img__ input image - % * __d__ gaussian size + % * __d__ gaussian size, default 31 % % ## Output % * __img__ output image @@ -81,10 +86,13 @@ function kern = motion_kernel(ang, d, sz) %MOTION_KERNEL Create linear motion filter % + % kern = motion_kernel(ang, d) + % kern = motion_kernel(ang, d, sz) + % % ## Input % * __ang__ linear motion angle % * __d__ linear motion length - % * __sz__ kernel size + % * __sz__ kernel size, default 65 % % ## Output % * __kern__ kernel @@ -103,9 +111,12 @@ function kern = defocus_kernel(d, sz) %DEFOCUS_KERNEL Create circular defocus kernel % + % kern = defocus_kernel(d) + % kern = defocus_kernel(d, sz) + % % ## Input % * __d__ circular motion diameter - % * __sz__ kernel size + % * __sz__ kernel size, default 65 % % ## Output % * __kern__ kernel diff --git a/src/+cv/dft.cpp b/src/+cv/dft.cpp index 2be04ee9e..c1a9e7daa 100644 --- a/src/+cv/dft.cpp +++ b/src/+cv/dft.cpp @@ -51,6 +51,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Process Mat src(rhs[0].toMat(rhs[0].isSingle() ? CV_32F : CV_64F)), dst; - dft(src, dst, flags); + dft(src, dst, flags, nonzeroRows); plhs[0] = MxArray(dst); } diff --git a/src/+cv/eigen.cpp b/src/+cv/eigen.cpp index 3e03eb354..ec4bc2142 100644 --- a/src/+cv/eigen.cpp +++ b/src/+cv/eigen.cpp @@ -26,11 +26,11 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Process Mat src(rhs[0].toMat(rhs[0].isSingle() ? CV_32F : CV_64F)); - Mat eigenvalues, eigenvectors; - bool b = eigen(src, eigenvalues, (nlhs>1 ? eigenvectors : noArray())); - plhs[0] = MxArray(eigenvalues); + Mat evals, evects; + bool b = eigen(src, evals, (nlhs>1 ? evects : noArray())); + plhs[0] = MxArray(evals); if (nlhs > 1) - plhs[1] = MxArray(eigenvectors); + plhs[1] = MxArray(evects); if (nlhs > 2) plhs[2] = MxArray(b); } diff --git a/src/+cv/eigenNonSymmetric.cpp b/src/+cv/eigenNonSymmetric.cpp new file mode 100644 index 000000000..79c3c4860 --- /dev/null +++ b/src/+cv/eigenNonSymmetric.cpp @@ -0,0 +1,34 @@ +/** + * @file eigenNonSymmetric.cpp + * @brief mex interface for cv::eigenNonSymmetric + * @ingroup core + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs==1 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Process + Mat src(rhs[0].toMat(rhs[0].isSingle() ? CV_32F : CV_64F)); + Mat evals, evects; + eigenNonSymmetric(src, evals, (nlhs>1 ? evects : noArray())); + plhs[0] = MxArray(evals); + if (nlhs > 1) + plhs[1] = MxArray(evects); +} diff --git a/src/+cv/findCirclesGrid.cpp b/src/+cv/findCirclesGrid.cpp index 5051f69ec..90906647b 100644 --- a/src/+cv/findCirclesGrid.cpp +++ b/src/+cv/findCirclesGrid.cpp @@ -17,16 +17,16 @@ const ConstMap GridTypesMap = ("Symmetric", cv::CirclesGridFinderParameters::SYMMETRIC_GRID) ("Asymmetric", cv::CirclesGridFinderParameters::ASYMMETRIC_GRID); -/** Convert MxArray to cv::CirclesGridFinderParameters +/** Convert MxArray to cv::CirclesGridFinderParameters2 * @param arr MxArray object. In one of the following forms: * - a scalar struct * - a cell-array of the form: {GridType, ...} starting with the grid * type ("Symmetric" or "Asymmetric") followed by pairs of key-value options - * @return instance of CirclesGridFinderParameters object + * @return instance of CirclesGridFinderParameters2 object */ -CirclesGridFinderParameters MxArrayToFinderParameters(const MxArray &arr) +CirclesGridFinderParameters2 MxArrayToFinderParameters(const MxArray &arr) { - CirclesGridFinderParameters params; + CirclesGridFinderParameters2 params; if (arr.isStruct()) { params.gridType = GridTypesMap[arr.at("gridType").toString()]; if (arr.isField("densityNeighborhoodSize")) @@ -55,6 +55,10 @@ CirclesGridFinderParameters MxArrayToFinderParameters(const MxArray &arr) params.convexHullFactor = arr.at("convexHullFactor").toFloat(); if (arr.isField("minRNGEdgeSwitchDist")) params.minRNGEdgeSwitchDist = arr.at("minRNGEdgeSwitchDist").toFloat(); + if (arr.isField("squareSize")) + params.squareSize = arr.at("squareSize").toFloat(); + if (arr.isField("maxRectifiedDistance")) + params.maxRectifiedDistance = arr.at("maxRectifiedDistance").toFloat(); } else { vector args(arr.toVector()); @@ -88,9 +92,13 @@ CirclesGridFinderParameters MxArrayToFinderParameters(const MxArray &arr) params.convexHullFactor = args[i+1].toFloat(); else if (key == "MinRNGEdgeSwitchDist") params.minRNGEdgeSwitchDist = args[i+1].toFloat(); + else if (key == "SquareSize") + params.squareSize = args[i+1].toFloat(); + else if (key == "MaxRectifiedDistance") + params.maxRectifiedDistance = args[i+1].toFloat(); else mexErrMsgIdAndTxt("mexopencv:error", - "Unrecognized CirclesGridFinderParameters option %s", key.c_str()); + "Unrecognized CirclesGridFinderParameters2 option %s", key.c_str()); } } return params; @@ -116,7 +124,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) bool symmetricGrid = true; bool clustering = false; Ptr blobDetector; - CirclesGridFinderParameters params; + CirclesGridFinderParameters2 params; for (int i=2; i centers; - bool patternFound = findCirclesGrid(image, patternSize, centers, flags, + bool patternFound = findCirclesGrid2(image, patternSize, centers, flags, blobDetector, params); plhs[0] = MxArray(centers); if (nlhs > 1) diff --git a/src/+cv/fitEllipse.cpp b/src/+cv/fitEllipse.cpp index 4f588dde1..12516ef38 100644 --- a/src/+cv/fitEllipse.cpp +++ b/src/+cv/fitEllipse.cpp @@ -1,6 +1,6 @@ /** * @file fitEllipse.cpp - * @brief mex interface for cv::fitEllipse + * @brief mex interface for cv::fitEllipse, cv::fitEllipseDirect, cv::fitEllipseAMS * @ingroup imgproc * @author Kota Yamaguchi * @date 2011 @@ -9,6 +9,17 @@ using namespace std; using namespace cv; +namespace { +/// Fit ellipse methods +enum {FIT_ELLIPSE_LINEAR, FIT_ELLIPSE_DIRECT, FIT_ELLIPSE_AMS}; + +/// Fit ellipse algorithm for option processing +const ConstMap FitEllipseAlgMap = ConstMap + ("Linear", FIT_ELLIPSE_LINEAR) + ("Direct", FIT_ELLIPSE_DIRECT) + ("AMS", FIT_ELLIPSE_AMS); +} + /** * Main entry called from Matlab * @param nlhs number of left-hand-side arguments @@ -19,20 +30,51 @@ using namespace cv; void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Check the number of arguments - nargchk(nrhs==1 && nlhs<=1); + nargchk(nrhs>=1 && (nrhs%2)==1 && nlhs<=1); // Argument vector vector rhs(prhs, prhs+nrhs); + // Option processing + int method = FIT_ELLIPSE_LINEAR; + for (int i=1; i points(rhs[0].toVector()); - r = fitEllipse(points); + switch (method) { + case FIT_ELLIPSE_LINEAR: + r = fitEllipse(points); + break; + case FIT_ELLIPSE_DIRECT: + r = fitEllipseDirect(points); + break; + case FIT_ELLIPSE_AMS: + r = fitEllipseAMS(points); + break; + } } else mexErrMsgIdAndTxt("mexopencv:error", "Invalid points argument"); diff --git a/src/+cv/goodFeaturesToTrack.cpp b/src/+cv/goodFeaturesToTrack.cpp index c96ef01de..ea4492f31 100644 --- a/src/+cv/goodFeaturesToTrack.cpp +++ b/src/+cv/goodFeaturesToTrack.cpp @@ -30,6 +30,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) double minDistance = 2.0; Mat mask; int blockSize = 3; + int gradientSize = 3; bool useHarrisDetector = false; double k = 0.04; for (int i=1; i corners; goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, - mask, blockSize, useHarrisDetector, k); + mask, blockSize, gradientSize, useHarrisDetector, k); plhs[0] = MxArray(corners); } diff --git a/src/+cv/private/DetectionBasedTracker_.cpp b/src/+cv/private/DetectionBasedTracker_.cpp index 434c62908..852cedefe 100644 --- a/src/+cv/private/DetectionBasedTracker_.cpp +++ b/src/+cv/private/DetectionBasedTracker_.cpp @@ -11,12 +11,11 @@ using namespace std; using namespace cv; //HACK: detection_based_tracker.cpp requires C++11 threads or Unix pthreads -// to compile. On Windows, it means we need >= VS2012 (VS2010 doesnt work). +// to compile. On Windows, it means we need >= VS2013 (VS2010 doesnt work). //HACK: I'm excluding MinGW since not all builds have std::thread support, // plus the detection code in opencv doesn't handle MinGW correctly. -#if (defined(__linux__) || defined(__APPLE__) || \ - (defined(__cplusplus) && __cplusplus > 199711L) || \ - (defined(_MSC_VER) && _MSC_VER >= 1700)) && !defined(__MINGW32__) +#if defined(__linux__) || defined(__APPLE__) || \ + (defined(CV_CXX11) && !defined(__MINGW32__)) namespace { // Persistent objects diff --git a/src/+cv/private/KeyPointsFilter_.cpp b/src/+cv/private/KeyPointsFilter_.cpp index 424ab9143..c2d5b8496 100644 --- a/src/+cv/private/KeyPointsFilter_.cpp +++ b/src/+cv/private/KeyPointsFilter_.cpp @@ -32,6 +32,12 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) KeyPointsFilter::removeDuplicated(keypoints); plhs[0] = MxArray(keypoints); } + else if (method == "removeDuplicatedSorted") { + nargchk(nrhs==2 && nlhs<=1); + vector keypoints(rhs[1].toVector()); + KeyPointsFilter::removeDuplicatedSorted(keypoints); + plhs[0] = MxArray(keypoints); + } else if (method == "retainBest") { nargchk(nrhs==3 && nlhs<=1); vector keypoints(rhs[1].toVector()); diff --git a/src/+cv/private/Net_.cpp b/src/+cv/private/Net_.cpp index 22d7d7d98..282ab7445 100644 --- a/src/+cv/private/Net_.cpp +++ b/src/+cv/private/Net_.cpp @@ -25,8 +25,33 @@ const ConstMap BackendsMap = ConstMap /// Computation target devices for option processing const ConstMap TargetsMap = ConstMap - ("CPU", cv::dnn::DNN_TARGET_CPU) - ("OpenCL", cv::dnn::DNN_TARGET_OPENCL); + ("CPU", cv::dnn::DNN_TARGET_CPU) + ("OpenCL", cv::dnn::DNN_TARGET_OPENCL); + +/// Computation target devices for option processing +const ConstMap TargetsInvMap = ConstMap + (cv::dnn::DNN_TARGET_CPU, "CPU") + (cv::dnn::DNN_TARGET_OPENCL, "OpenCL"); + +/** + * Create 4-dimensional blob from MATLAB array + * @param arr input MxArray object (numeric array). + * @return blob 4-dimensional cv::MatND. + * @see MxArray::toMatND + */ +MatND MxArrayToBlob(const MxArray& arr) +{ + MatND blob(arr.toMatND(CV_32F)); + if (blob.dims < 4) { + //HACK: add trailing singleton dimensions (up to 4D) + // (needed because in MATLAB, size(zeros(2,10,1,1)) is [2 10], + // but some dnn methods expect blobs to have ndims==4) + int sz[4] = {1, 1, 1, 1}; + std::copy(blob.size.p, blob.size.p + blob.dims, sz); + blob = blob.reshape(0, 4, sz); + } + return blob; +} /** Convert MxArray to cv::dnn::Net::LayerId * @param arr MxArray object. In one of the following forms: @@ -95,19 +120,37 @@ LayerParams MxArrayToLayerParams(const MxArray& arr) for (int i = 0; i < dict.nfields(); ++i) { string key(dict.fieldname(i)); const MxArray val(dict.at(key)); - if (val.isChar()) - params.set(key, val.toString()); - else if (val.isFloat()) - params.set(key, val.toDouble()); - else - params.set(key, val.toInt()); + if (val.isChar()) { + if (val.numel() == 1) + params.set(key, val.toString()); + else { + vector v(val.toVector()); + params.set(key, DictValue::arrayString(v.begin(), v.size())); + } + } + else if (val.isFloat()) { + if (val.numel() == 1) + params.set(key, val.toDouble()); + else { + vector v(val.toVector()); + params.set(key, DictValue::arrayReal(v.begin(), v.size())); + } + } + else { + if (val.numel() == 1) + params.set(key, val.toInt()); + else { + vector v(val.toVector()); + params.set(key, DictValue::arrayInt(v.begin(), v.size())); + } + } } } if (arr.isField("blobs")) { vector blobs(arr.at("blobs").toVector()); params.blobs.reserve(blobs.size()); for (vector::const_iterator it = blobs.begin(); it != blobs.end(); ++it) - params.blobs.push_back(it->toMatND(CV_32F)); + params.blobs.push_back(MxArrayToBlob(*it)); } if (arr.isField("name")) params.name = arr.at("name").toString(); if (arr.isField("type")) params.type = arr.at("type").toString(); @@ -125,6 +168,7 @@ MxArray toStruct(const Ptr &layer) s.set("blobs", layer->blobs); s.set("name", layer->name); s.set("type", layer->type); + s.set("preferableTarget", TargetsInvMap[layer->preferableTarget]); return s; } @@ -140,9 +184,69 @@ MxArray toStruct(const vector > &layers) s.set("blobs", layers[i]->blobs, i); s.set("name", layers[i]->name, i); s.set("type", layers[i]->type, i); + s.set("preferableTarget", TargetsInvMap[layers[i]->preferableTarget], i); } return s; } + +/** MxArray constructor from 64-bit integer. + * @param i int value. + * @return MxArray object, a scalar int64 array. + */ +MxArray toMxArray(int64_t i) +{ + MxArray arr(mxCreateNumericMatrix(1, 1, mxINT64_CLASS, mxREAL)); + if (arr.isNull()) + mexErrMsgIdAndTxt("mexopencv:error", "Allocation error"); + arr.set(0, i); + return arr; +} + +/** Create an instance of Net using options in arguments + * @param type type of network to import, one of: + * - "Caffe" + * - "Tensorflow" + * - "Torch" + * - "Darknet" + * @param first iterator at the beginning of the vector range + * @param last iterator at the end of the vector range + * @return smart pointer to created Net + */ +Ptr readNetFrom(const string &type, + vector::const_iterator first, + vector::const_iterator last) +{ + ptrdiff_t len = std::distance(first, last); + Net net; + if (type == "Caffe") { + nargchk(len==1 || len==2); + string prototxt(first->toString()); ++first; + string caffeModel(len==2 ? first->toString() : string()); + net = readNetFromCaffe(prototxt, caffeModel); + } + else if (type == "Tensorflow") { + nargchk(len==1 || len==2); + string model(first->toString()); ++first; + string config(len==2 ? first->toString() : string()); + net = readNetFromTensorflow(model, config); + } + else if (type == "Torch") { + nargchk(len==1 || len==2); + string filename(first->toString()); ++first; + bool isBinary = (len==2 ? first->toBool() : true); + net = readNetFromTorch(filename, isBinary); + } + else if (type == "Darknet") { + nargchk(len==1 || len==2); + string cfgFile(first->toString()); ++first; + string darknetModel(len==2 ? first->toString() : string()); + net = readNetFromDarknet(cfgFile, darknetModel); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized network type %s", type.c_str()); + return makePtr(net); +} } /** @@ -155,7 +259,7 @@ MxArray toStruct(const vector > &layers) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Check the number of arguments - nargchk(nrhs>=2 && nlhs<=1); + nargchk(nrhs>=2 && nlhs<=2); // Argument vector vector rhs(prhs, prhs+nrhs); @@ -164,8 +268,10 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Constructor is called. Create a new object from argument if (method == "new") { - nargchk(nrhs==2 && nlhs<=1); - obj_[++last_id] = makePtr(); + nargchk(nrhs>=2 && nlhs<=1); + obj_[++last_id] = (nrhs > 2) ? + readNetFrom(rhs[2].toString(), rhs.begin() + 3, rhs.end()) : + makePtr(); plhs[0] = MxArray(last_id); mexLock(); return; @@ -193,6 +299,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) Size size; Scalar mean; bool swapRB = true; + bool crop = true; for (int i=3; i::const_iterator it = arr.begin(); it != arr.end(); ++it) images.push_back(it->toMat(CV_32F)); } - blob = blobFromImages(images, scalefactor, size, mean, swapRB); + blob = blobFromImages(images, scalefactor, size, mean, swapRB, crop); } else { Mat image(rhs[2].toMat(CV_32F)); - blob = blobFromImage(image, scalefactor, size, mean, swapRB); + blob = blobFromImage(image, scalefactor, size, mean, swapRB, crop); } plhs[0] = MxArray(blob); return; } + else if (method == "shrinkCaffeModel") { + nargchk(nrhs==4 && nlhs==0); + string src(rhs[2].toString()), + dst(rhs[3].toString()); + shrinkCaffeModel(src, dst); + return; + } // Big operation switch Ptr obj = obj_[id]; @@ -236,36 +352,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) obj_.erase(id); mexUnlock(); } - else if (method == "import") { - nargchk(nrhs>=3 && nlhs==0); - Ptr importer; - string type(rhs[2].toString()); - if (type == "Caffe") { - nargchk(nrhs==4 || nrhs==5); - string prototxt(rhs[3].toString()), caffeModel; - if (nrhs == 5) - caffeModel = rhs[4].toString(); - importer = createCaffeImporter(prototxt, caffeModel); - } - else if (type == "Tensorflow") { - nargchk(nrhs==4); - string model(rhs[3].toString()); - importer = createTensorflowImporter(model); - } - else if (type == "Torch") { - nargchk(nrhs==4 || nrhs==5); - string filename(rhs[3].toString()); - bool isBinary = (nrhs == 5) ? rhs[4].toBool() : true; - importer = createTorchImporter(filename, isBinary); - } - else - mexErrMsgIdAndTxt("mexopencv:error", - "Unrecognized importer type %s", type.c_str()); - if (importer.empty()) - mexErrMsgIdAndTxt("mexopencv:error", "Failed to create Importer"); - importer->populateNet(*obj.get()); - importer.release(); - } else if (method == "empty") { nargchk(nrhs==2 && nlhs<=1); plhs[0] = MxArray(obj->empty()); @@ -275,22 +361,22 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) string name(rhs[2].toString()), type(rhs[3].toString()); LayerParams params(MxArrayToLayerParams(rhs[4])); - int id = obj->addLayer(name, type, params); - plhs[0] = MxArray(id); + int lid = obj->addLayer(name, type, params); + plhs[0] = MxArray(lid); } else if (method == "addLayerToPrev") { nargchk(nrhs==5 && nlhs<=1); string name(rhs[2].toString()), type(rhs[3].toString()); LayerParams params(MxArrayToLayerParams(rhs[4])); - int id = obj->addLayerToPrev(name, type, params); - plhs[0] = MxArray(id); + int lid = obj->addLayerToPrev(name, type, params); + plhs[0] = MxArray(lid); } else if (method == "getLayerId") { nargchk(nrhs==3 && nlhs<=1); string layer(rhs[2].toString()); - int id = obj->getLayerId(layer); - plhs[0] = MxArray(id); //TODO: return int32 scalar value + int lid = obj->getLayerId(layer); + plhs[0] = MxArray(lid); } else if (method == "getLayerNames") { nargchk(nrhs==2 && nlhs<=1); @@ -390,7 +476,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) } else if (method == "setInput") { nargchk((nrhs==3 || nrhs==4) && nlhs==0); - MatND blob(rhs[2].toMatND(CV_32F)); + MatND blob(MxArrayToBlob(rhs[2])); if (nrhs > 3) obj->setInput(blob, rhs[3].toString()); else @@ -400,7 +486,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==5 && nlhs==0); Net::LayerId layer(MxArrayToLayerId(rhs[2])); int numParam = rhs[3].toInt(); - MatND blob(rhs[4].toMatND(CV_32F)); + MatND blob(MxArrayToBlob(rhs[4])); obj->setParam(layer, numParam, blob); } else if (method == "getParam") { @@ -431,6 +517,14 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==3 && nlhs==0); obj->enableFusion(rhs[2].toBool()); } + else if (method == "getPerfProfile") { + nargchk(nrhs==2 && nlhs<=2); + vector timings; + int64 total = obj->getPerfProfile(timings); + plhs[0] = MxArray(timings); + if (nlhs > 1) + plhs[1] = toMxArray(total); + } //TODO: //else if (method == "getLayerShapes") {} //else if (method == "getLayersShapes") {} diff --git a/src/+cv/private/TickMeter_.cpp b/src/+cv/private/TickMeter_.cpp index bd91c8c13..f75dcafe4 100644 --- a/src/+cv/private/TickMeter_.cpp +++ b/src/+cv/private/TickMeter_.cpp @@ -1,6 +1,6 @@ /** * @file TickMeter_.cpp - * @brief mex interface for cv::TickMeter + * @brief mex interface for cv::TickMeter and related functions * @ingroup core * @author Amro * @date 2017 @@ -55,6 +55,22 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexLock(); return; } + // static method calls + else if (method == "getTickCount") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = toMxArray(getTickCount()); + return; + } + else if (method == "getTickFrequency") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(getTickFrequency()); + return; + } + else if (method == "getCPUTickCount") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = toMxArray(getCPUTickCount()); + return; + } // Big operation switch Ptr obj = obj_[id]; diff --git a/src/+cv/private/Utils_.cpp b/src/+cv/private/Utils_.cpp index 84df49d1c..7c5c44a69 100644 --- a/src/+cv/private/Utils_.cpp +++ b/src/+cv/private/Utils_.cpp @@ -9,6 +9,336 @@ using namespace std; using namespace cv; +// HAVE_IPP, HAVE_OPENVX, HAVE_OPENCL, HAVE_CUDA, HAVE_TEGRA_OPTIMIZATION +#include "opencv2/cvconfig.h" +#include "opencv2/core/ocl.hpp" +#include "opencv2/core/cuda.hpp" + +namespace { +/** Convert size type to MxArray. + * @param i value. + * @return MxArray object, a scalar uint64 array. + */ +MxArray toMxArray(size_t i) +{ + MxArray arr(mxCreateNumericMatrix(1, 1, mxUINT64_CLASS, mxREAL)); + if (arr.isNull()) + mexErrMsgIdAndTxt("mexopencv:error", "Allocation error"); + arr.set(0, static_cast(i)); + return arr; +} + +/** Convert vector of size type to MxArray. + * @param v vector. + * @return MxArray object, a vector uint64 array. + */ +MxArray toMxArray(const std::vector &v) +{ + MxArray arr(mxCreateNumericMatrix(1, v.size(), mxUINT64_CLASS, mxREAL)); + if (arr.isNull()) + mexErrMsgIdAndTxt("mexopencv:error", "Allocation error"); + for (size_t i = 0; i < v.size(); ++i) + arr.set(i, static_cast(v[i])); + return arr; +} + +/// OpenCL device type +const ConstMap OCLTypeMap = ConstMap + (cv::ocl::Device::TYPE_DEFAULT, "Default") + (cv::ocl::Device::TYPE_CPU, "CPU") + (cv::ocl::Device::TYPE_GPU, "GPU") + (cv::ocl::Device::TYPE_ACCELERATOR, "Accelerator") + (cv::ocl::Device::TYPE_DGPU, "DGPU") + (cv::ocl::Device::TYPE_IGPU, "IGPU"); + +/// OpenCL type of global memory cache +const ConstMap OCLCacheMap = ConstMap + (cv::ocl::Device::NO_CACHE, "NoCache") + (cv::ocl::Device::READ_ONLY_CACHE, "ReadOnlyCache") + (cv::ocl::Device::READ_WRITE_CACHE, "ReadWriteCache"); + +/// OpenCL type of local memory +const ConstMap OCLMemMap = ConstMap + (cv::ocl::Device::NO_LOCAL_MEM, "NoLocalMem") + (cv::ocl::Device::LOCAL_IS_LOCAL, "LocalIsLocal") + (cv::ocl::Device::LOCAL_IS_GLOBAL, "LocalIsGlobal"); + +/// OpenCL vendor name +const ConstMap OCLVendorMap = ConstMap + (cv::ocl::Device::UNKNOWN_VENDOR, "Unknown") + (cv::ocl::Device::VENDOR_AMD, "AMD") + (cv::ocl::Device::VENDOR_INTEL, "Intel") + (cv::ocl::Device::VENDOR_NVIDIA, "Nvidia"); + +/// CUDA device compute modes +const ConstMap CUDAComputeModeMap = ConstMap + (cv::cuda::DeviceInfo::ComputeModeDefault, "Default") + (cv::cuda::DeviceInfo::ComputeModeExclusive, "Exclusive") + (cv::cuda::DeviceInfo::ComputeModeProhibited, "Prohibited") + (cv::cuda::DeviceInfo::ComputeModeExclusiveProcess, "ExclusiveProcess"); + +/** Convert OpenCL FP config bit-field to MxArray. + * @param flags int value. + * @return MxArray object, a scalar struct. + */ +MxArray toFPConfigStruct(int flags) +{ + const char *fields[8] = {"Denorm", "InfNaN", "RoundToNearest", + "RoundToZero", "RoundToInf", "FMA", "SoftFloat", + "CorrectlyRoundedDivideSqrt"}; + MxArray s = MxArray::Struct(fields, 8); + s.set(fields[0], (flags & cv::ocl::Device::FP_DENORM) != 0); + s.set(fields[1], (flags & cv::ocl::Device::FP_INF_NAN) != 0); + s.set(fields[2], (flags & cv::ocl::Device::FP_ROUND_TO_NEAREST) != 0); + s.set(fields[3], (flags & cv::ocl::Device::FP_ROUND_TO_ZERO) != 0); + s.set(fields[4], (flags & cv::ocl::Device::FP_ROUND_TO_INF) != 0); + s.set(fields[5], (flags & cv::ocl::Device::FP_FMA) != 0); + s.set(fields[6], (flags & cv::ocl::Device::FP_SOFT_FLOAT) != 0); + s.set(fields[7], (flags & cv::ocl::Device::FP_CORRECTLY_ROUNDED_DIVIDE_SQRT) != 0); + return s; +} + +/** Convert OpenCL execution capabilities bit-field to MxArray. + * @param flags int value. + * @return MxArray object, a scalar struct. + */ +MxArray toExecCapStruct(int flags) +{ + const char *fields[2] = {"Kernel", "NativeKernel"}; + MxArray s = MxArray::Struct(fields, 2); + s.set(fields[0], (flags & cv::ocl::Device::EXEC_KERNEL) != 0); + s.set(fields[1], (flags & cv::ocl::Device::EXEC_NATIVE_KERNEL) != 0); + return s; +} + +/** Convert OpenCL platform info vector to struct array + * @param vpi vector of platform info + * @return struct-array MxArray object + */ +MxArray toStruct(const vector &vpi) +{ + const char *fieldsP[4] = {"name", "vendor", "version", "device"}; + const char *fieldsD[68] = {"name", "extensions", "version", "vendorName", + "OpenCL_C_Version", "OpenCLVersion", "deviceVersionMajor", + "deviceVersionMinor", "driverVersion", "type", "addressBits", + "available", "compilerAvailable", "linkerAvailable", "doubleFPConfig", + "singleFPConfig", "halfFPConfig", "endianLittle", + "errorCorrectionSupport", "executionCapabilities", + "globalMemCacheSize", "globalMemCacheType", "globalMemCacheLineSize", + "globalMemSize", "localMemSize", "localMemType", "hostUnifiedMemory", + "imageSupport", "imageFromBufferSupport", "imagePitchAlignment", + "imageBaseAddressAlignment", "image2DMaxWidth", "image2DMaxHeight", + "image3DMaxWidth", "image3DMaxHeight", "image3DMaxDepth", + "imageMaxBufferSize", "imageMaxArraySize", "vendorID", + "maxClockFrequency", "maxComputeUnits", "maxConstantArgs", + "maxConstantBufferSize", "maxMemAllocSize", "maxParameterSize", + "maxReadImageArgs", "maxWriteImageArgs", "maxSamplers", + "maxWorkGroupSize", "maxWorkItemDims", "maxWorkItemSizes", + "memBaseAddrAlign", "nativeVectorWidthChar", "nativeVectorWidthShort", + "nativeVectorWidthInt", "nativeVectorWidthLong", + "nativeVectorWidthFloat", "nativeVectorWidthDouble", + "nativeVectorWidthHalf", "preferredVectorWidthChar", + "preferredVectorWidthShort", "preferredVectorWidthInt", + "preferredVectorWidthLong", "preferredVectorWidthFloat", + "preferredVectorWidthDouble", "preferredVectorWidthHalf", + "printfBufferSize", "profilingTimerResolution"}; + MxArray sp = MxArray::Struct(fieldsP, 4, 1, vpi.size()); + for (size_t i = 0; i < vpi.size(); ++i) { + const cv::ocl::PlatformInfo &pi = vpi[i]; + MxArray sd = MxArray::Struct(fieldsD, 68, 1, pi.deviceNumber()); + for (int j = 0; j < pi.deviceNumber(); ++j) { + cv::ocl::Device di; + pi.getDevice(di, j); + sd.set(fieldsD[0], di.name(), j); + sd.set(fieldsD[1], di.extensions(), j); + sd.set(fieldsD[2], di.version(), j); + sd.set(fieldsD[3], di.vendorName(), j); + sd.set(fieldsD[4], di.OpenCL_C_Version(), j); + sd.set(fieldsD[5], di.OpenCLVersion(), j); + sd.set(fieldsD[6], di.deviceVersionMajor(), j); + sd.set(fieldsD[7], di.deviceVersionMinor(), j); + sd.set(fieldsD[8], di.driverVersion(), j); + sd.set(fieldsD[9], OCLTypeMap[di.type()], j); + sd.set(fieldsD[10], di.addressBits(), j); + sd.set(fieldsD[11], di.available(), j); + sd.set(fieldsD[12], di.compilerAvailable(), j); + sd.set(fieldsD[13], di.linkerAvailable(), j); + sd.set(fieldsD[14], toFPConfigStruct(di.doubleFPConfig()), j); + sd.set(fieldsD[15], toFPConfigStruct(di.singleFPConfig()), j); + sd.set(fieldsD[16], toFPConfigStruct(di.halfFPConfig()), j); + sd.set(fieldsD[17], di.endianLittle(), j); + sd.set(fieldsD[18], di.errorCorrectionSupport(), j); + sd.set(fieldsD[19], toExecCapStruct(di.executionCapabilities()), j); + sd.set(fieldsD[20], toMxArray(di.globalMemCacheSize()), j); + sd.set(fieldsD[21], OCLCacheMap[di.globalMemCacheType()], j); + sd.set(fieldsD[22], di.globalMemCacheLineSize(), j); + sd.set(fieldsD[23], toMxArray(di.globalMemSize()), j); + sd.set(fieldsD[24], toMxArray(di.localMemSize()), j); + sd.set(fieldsD[25], OCLMemMap[di.localMemType()], j); + sd.set(fieldsD[26], di.hostUnifiedMemory(), j); + sd.set(fieldsD[27], di.imageSupport(), j); + sd.set(fieldsD[28], di.imageFromBufferSupport(), j); + sd.set(fieldsD[29], static_cast(di.imagePitchAlignment()), j); + sd.set(fieldsD[30], static_cast(di.imageBaseAddressAlignment()), j); + sd.set(fieldsD[31], toMxArray(di.image2DMaxWidth()), j); + sd.set(fieldsD[32], toMxArray(di.image2DMaxHeight()), j); + sd.set(fieldsD[33], toMxArray(di.image3DMaxWidth()), j); + sd.set(fieldsD[34], toMxArray(di.image3DMaxHeight()), j); + sd.set(fieldsD[35], toMxArray(di.image3DMaxDepth()), j); + sd.set(fieldsD[36], toMxArray(di.imageMaxBufferSize()), j); + sd.set(fieldsD[37], toMxArray(di.imageMaxArraySize()), j); + sd.set(fieldsD[38], OCLVendorMap[di.vendorID()], j); + sd.set(fieldsD[39], di.maxClockFrequency(), j); + sd.set(fieldsD[40], di.maxComputeUnits(), j); + sd.set(fieldsD[41], di.maxConstantArgs(), j); + sd.set(fieldsD[42], toMxArray(di.maxConstantBufferSize()), j); + sd.set(fieldsD[43], toMxArray(di.maxMemAllocSize()), j); + sd.set(fieldsD[44], toMxArray(di.maxParameterSize()), j); + sd.set(fieldsD[45], di.maxReadImageArgs(), j); + sd.set(fieldsD[46], di.maxWriteImageArgs(), j); + sd.set(fieldsD[47], di.maxSamplers(), j); + sd.set(fieldsD[48], toMxArray(di.maxWorkGroupSize()), j); + sd.set(fieldsD[49], di.maxWorkItemDims(), j); + { + vector mwis(32); // MAX_DIMS + di.maxWorkItemSizes(&mwis[0]); + mwis.resize(di.maxWorkItemDims()); + sd.set(fieldsD[50], toMxArray(mwis), j); + } + sd.set(fieldsD[51], di.memBaseAddrAlign(), j); + sd.set(fieldsD[52], di.nativeVectorWidthChar(), j); + sd.set(fieldsD[53], di.nativeVectorWidthShort(), j); + sd.set(fieldsD[54], di.nativeVectorWidthInt(), j); + sd.set(fieldsD[55], di.nativeVectorWidthLong(), j); + sd.set(fieldsD[56], di.nativeVectorWidthFloat(), j); + sd.set(fieldsD[57], di.nativeVectorWidthDouble(), j); + sd.set(fieldsD[58], di.nativeVectorWidthHalf(), j); + sd.set(fieldsD[59], di.preferredVectorWidthChar(), j); + sd.set(fieldsD[60], di.preferredVectorWidthShort(), j); + sd.set(fieldsD[61], di.preferredVectorWidthInt(), j); + sd.set(fieldsD[62], di.preferredVectorWidthLong(), j); + sd.set(fieldsD[63], di.preferredVectorWidthFloat(), j); + sd.set(fieldsD[64], di.preferredVectorWidthDouble(), j); + sd.set(fieldsD[65], di.preferredVectorWidthHalf(), j); + sd.set(fieldsD[66], toMxArray(di.printfBufferSize()), j); + sd.set(fieldsD[67], toMxArray(di.profilingTimerResolution()), j); + } + sp.set(fieldsP[0], pi.name(), i); + sp.set(fieldsP[1], pi.vendor(), i); + sp.set(fieldsP[2], pi.version(), i); + sp.set(fieldsP[3], sd, i); + } + return sp; +} + +/** Convert CUDA device info to struct array + * @param di device info object + * @return scalar struct MxArray object + */ +MxArray toStruct(const cv::cuda::DeviceInfo &di) +{ + const char *fields[57] = {"deviceID", "name", "totalGlobalMem", + "sharedMemPerBlock", "regsPerBlock", "warpSize", "memPitch", + "maxThreadsPerBlock", "maxThreadsDim", "maxGridSize", "clockRate", + "totalConstMem", "majorVersion", "minorVersion", "textureAlignment", + "texturePitchAlignment", "multiProcessorCount", + "kernelExecTimeoutEnabled", "integrated", "canMapHostMemory", + "computeMode", "maxTexture1D", "maxTexture1DMipmap", + "maxTexture1DLinear", "maxTexture2D", "maxTexture2DMipmap", + "maxTexture2DLinear", "maxTexture2DGather", "maxTexture3D", + "maxTextureCubemap", "maxTexture1DLayered", "maxTexture2DLayered", + "maxTextureCubemapLayered", "maxSurface1D", "maxSurface2D", + "maxSurface3D", "maxSurface1DLayered", "maxSurface2DLayered", + "maxSurfaceCubemap", "maxSurfaceCubemapLayered", "surfaceAlignment", + "concurrentKernels", "ECCEnabled", "pciBusID", "pciDeviceID", + "pciDomainID", "tccDriver", "asyncEngineCount", "unifiedAddressing", + "memoryClockRate", "memoryBusWidth", "l2CacheSize", + "maxThreadsPerMultiProcessor", "freeMemory", "totalMemory", + "supports", "isCompatible"}; + MxArray s = MxArray::Struct(fields, 57); + s.set(fields[0], di.deviceID()); + s.set(fields[1], di.name()); + s.set(fields[2], toMxArray(di.totalGlobalMem())); + s.set(fields[3], toMxArray(di.sharedMemPerBlock())); + s.set(fields[4], di.regsPerBlock()); + s.set(fields[5], di.warpSize()); + s.set(fields[6], toMxArray(di.memPitch())); + s.set(fields[7], di.maxThreadsPerBlock()); + s.set(fields[8], di.maxThreadsDim()); + s.set(fields[9], di.maxGridSize()); + s.set(fields[10], di.clockRate()); + s.set(fields[11], toMxArray(di.totalConstMem())); + s.set(fields[12], di.majorVersion()); + s.set(fields[13], di.minorVersion()); + s.set(fields[14], toMxArray(di.textureAlignment())); + s.set(fields[15], toMxArray(di.texturePitchAlignment())); + s.set(fields[16], di.multiProcessorCount()); + s.set(fields[17], di.kernelExecTimeoutEnabled()); + s.set(fields[18], di.integrated()); + s.set(fields[19], di.canMapHostMemory()); + s.set(fields[20], CUDAComputeModeMap[di.computeMode()]); + s.set(fields[21], di.maxTexture1D()); + s.set(fields[22], di.maxTexture1DMipmap()); + s.set(fields[23], di.maxTexture1DLinear()); + s.set(fields[24], di.maxTexture2D()); + s.set(fields[25], di.maxTexture2DMipmap()); + s.set(fields[26], di.maxTexture2DLinear()); + s.set(fields[27], di.maxTexture2DGather()); + s.set(fields[28], di.maxTexture3D()); + s.set(fields[29], di.maxTextureCubemap()); + s.set(fields[30], di.maxTexture1DLayered()); + s.set(fields[31], di.maxTexture2DLayered()); + s.set(fields[32], di.maxTextureCubemapLayered()); + s.set(fields[33], di.maxSurface1D()); + s.set(fields[34], di.maxSurface2D()); + s.set(fields[35], di.maxSurface3D()); + s.set(fields[36], di.maxSurface1DLayered()); + s.set(fields[37], di.maxSurface2DLayered()); + s.set(fields[38], di.maxSurfaceCubemap()); + s.set(fields[39], di.maxSurfaceCubemapLayered()); + s.set(fields[40], toMxArray(di.surfaceAlignment())); + s.set(fields[41], di.concurrentKernels()); + s.set(fields[42], di.ECCEnabled()); + s.set(fields[43], di.pciBusID()); + s.set(fields[44], di.pciDeviceID()); + s.set(fields[45], di.pciDomainID()); + s.set(fields[46], di.tccDriver()); + s.set(fields[47], di.asyncEngineCount()); + s.set(fields[48], di.unifiedAddressing()); + s.set(fields[49], di.memoryClockRate()); + s.set(fields[50], di.memoryBusWidth()); + s.set(fields[51], di.l2CacheSize()); + s.set(fields[52], di.maxThreadsPerMultiProcessor()); + s.set(fields[53], toMxArray(di.freeMemory())); + s.set(fields[54], toMxArray(di.totalMemory())); + { + const char *fieldsFS[15] = {"Compute10", "Compute11", "Compute12", + "Compute13", "Compute20", "Compute21", "Compute30", "Compute32", + "Compute35", "Compute50", "GlobalAtomics", "SharedAtomics", + "NativeDouble", "WarpShuffleFunctions", "DynamicParallelism"}; + MxArray sf = MxArray::Struct(fieldsFS, 15); + sf.set(fieldsFS[0], di.supports(cv::cuda::FEATURE_SET_COMPUTE_10)); + sf.set(fieldsFS[1], di.supports(cv::cuda::FEATURE_SET_COMPUTE_11)); + sf.set(fieldsFS[2], di.supports(cv::cuda::FEATURE_SET_COMPUTE_12)); + sf.set(fieldsFS[3], di.supports(cv::cuda::FEATURE_SET_COMPUTE_13)); + sf.set(fieldsFS[4], di.supports(cv::cuda::FEATURE_SET_COMPUTE_20)); + sf.set(fieldsFS[5], di.supports(cv::cuda::FEATURE_SET_COMPUTE_21)); + sf.set(fieldsFS[6], di.supports(cv::cuda::FEATURE_SET_COMPUTE_30)); + sf.set(fieldsFS[7], di.supports(cv::cuda::FEATURE_SET_COMPUTE_32)); + sf.set(fieldsFS[8], di.supports(cv::cuda::FEATURE_SET_COMPUTE_35)); + sf.set(fieldsFS[9], di.supports(cv::cuda::FEATURE_SET_COMPUTE_50)); + sf.set(fieldsFS[10], di.supports(cv::cuda::GLOBAL_ATOMICS)); + sf.set(fieldsFS[11], di.supports(cv::cuda::SHARED_ATOMICS)); + sf.set(fieldsFS[12], di.supports(cv::cuda::NATIVE_DOUBLE)); + sf.set(fieldsFS[13], di.supports(cv::cuda::WARP_SHUFFLE_FUNCTIONS)); + sf.set(fieldsFS[14], di.supports(cv::cuda::DYNAMIC_PARALLELISM)); + s.set(fields[55], sf); + } + s.set(fields[56], di.isCompatible()); + return s; +} +} + /** * Main entry called from Matlab * @param nlhs number of left-hand-side arguments @@ -28,29 +358,35 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Operation switch if (method == "checkHardwareSupport") { nargchk(nrhs==1 && nlhs<=1); - MxArray s = MxArray::Struct(); - s.set("MMX", checkHardwareSupport(CV_CPU_MMX)); - s.set("SSE", checkHardwareSupport(CV_CPU_SSE)); - s.set("SSE2", checkHardwareSupport(CV_CPU_SSE2)); - s.set("SSE3", checkHardwareSupport(CV_CPU_SSE3)); - s.set("SSSE3", checkHardwareSupport(CV_CPU_SSSE3)); - s.set("SSE4_1", checkHardwareSupport(CV_CPU_SSE4_1)); - s.set("SSE4_2", checkHardwareSupport(CV_CPU_SSE4_2)); - s.set("POPCNT", checkHardwareSupport(CV_CPU_POPCNT)); - s.set("FP16", checkHardwareSupport(CV_CPU_FP16)); - s.set("AVX", checkHardwareSupport(CV_CPU_AVX)); - s.set("AVX2", checkHardwareSupport(CV_CPU_AVX2)); - s.set("FMA3", checkHardwareSupport(CV_CPU_FMA3)); - s.set("AVX_512F", checkHardwareSupport(CV_CPU_AVX_512F)); - s.set("AVX_512BW", checkHardwareSupport(CV_CPU_AVX_512BW)); - s.set("AVX_512CD", checkHardwareSupport(CV_CPU_AVX_512CD)); - s.set("AVX_512DQ", checkHardwareSupport(CV_CPU_AVX_512DQ)); - s.set("AVX_512ER", checkHardwareSupport(CV_CPU_AVX_512ER)); - s.set("AVX_512IFMA512", checkHardwareSupport(CV_CPU_AVX_512IFMA512)); - s.set("AVX_512PF", checkHardwareSupport(CV_CPU_AVX_512PF)); - s.set("AVX_512VBMI", checkHardwareSupport(CV_CPU_AVX_512VBMI)); - s.set("AVX_512VL", checkHardwareSupport(CV_CPU_AVX_512VL)); - s.set("NEON", checkHardwareSupport(CV_CPU_NEON)); + const char *fields[23] = {"MMX", "SSE", "SSE2", "SSE3", "SSSE3", + "SSE4_1", "SSE4_2", "POPCNT", "FP16", "AVX", "AVX2", "FMA3", + "AVX_512F", "AVX_512BW", "AVX_512CD", "AVX_512DQ", "AVX_512ER", + "AVX_512IFMA512", "AVX_512PF", "AVX_512VBMI", "AVX_512VL", "NEON", + "VSX"}; + MxArray s = MxArray::Struct(fields, 23); + s.set(fields[0], checkHardwareSupport(CV_CPU_MMX)); + s.set(fields[1], checkHardwareSupport(CV_CPU_SSE)); + s.set(fields[2], checkHardwareSupport(CV_CPU_SSE2)); + s.set(fields[3], checkHardwareSupport(CV_CPU_SSE3)); + s.set(fields[4], checkHardwareSupport(CV_CPU_SSSE3)); + s.set(fields[5], checkHardwareSupport(CV_CPU_SSE4_1)); + s.set(fields[6], checkHardwareSupport(CV_CPU_SSE4_2)); + s.set(fields[7], checkHardwareSupport(CV_CPU_POPCNT)); + s.set(fields[8], checkHardwareSupport(CV_CPU_FP16)); + s.set(fields[9], checkHardwareSupport(CV_CPU_AVX)); + s.set(fields[10], checkHardwareSupport(CV_CPU_AVX2)); + s.set(fields[11], checkHardwareSupport(CV_CPU_FMA3)); + s.set(fields[12], checkHardwareSupport(CV_CPU_AVX_512F)); + s.set(fields[13], checkHardwareSupport(CV_CPU_AVX_512BW)); + s.set(fields[14], checkHardwareSupport(CV_CPU_AVX_512CD)); + s.set(fields[15], checkHardwareSupport(CV_CPU_AVX_512DQ)); + s.set(fields[16], checkHardwareSupport(CV_CPU_AVX_512ER)); + s.set(fields[17], checkHardwareSupport(CV_CPU_AVX_512IFMA512)); + s.set(fields[18], checkHardwareSupport(CV_CPU_AVX_512PF)); + s.set(fields[19], checkHardwareSupport(CV_CPU_AVX_512VBMI)); + s.set(fields[20], checkHardwareSupport(CV_CPU_AVX_512VL)); + s.set(fields[21], checkHardwareSupport(CV_CPU_NEON)); + s.set(fields[22], checkHardwareSupport(CV_CPU_VSX)); plhs[0] = s; } else if (method == "getBuildInformation") { @@ -81,6 +417,129 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs==2 && nlhs==0); setUseOptimized(rhs[1].toBool()); } + else if (method == "getIppVersion") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ipp::getIppVersion()); + } + else if (method == "useIPP") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ipp::useIPP()); + } + else if (method == "setUseIPP") { + nargchk(nrhs==2 && nlhs==0); + ipp::setUseIPP(rhs[1].toBool()); + } + else if (method == "useIPP_NE") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ipp::useIPP_NE()); + } + else if (method == "setUseIPP_NE") { + nargchk(nrhs==2 && nlhs==0); + ipp::setUseIPP_NE(rhs[1].toBool()); + } + else if (method == "haveOpenVX") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(haveOpenVX()); + } + else if (method == "useOpenVX") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(useOpenVX()); + } + else if (method == "setUseOpenVX") { + nargchk(nrhs==2 && nlhs==0); + setUseOpenVX(rhs[1].toBool()); + } + else if (method == "haveOpenCL") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ocl::haveOpenCL()); + } + else if (method == "haveAmdBlas") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ocl::haveAmdBlas()); + } + else if (method == "haveAmdFft") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ocl::haveAmdFft()); + } + else if (method == "haveSVM") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ocl::haveSVM()); + } + else if (method == "useOpenCL") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(ocl::useOpenCL()); + } + else if (method == "setUseOpenCL") { + nargchk(nrhs==2 && nlhs==0); + ocl::setUseOpenCL(rhs[1].toBool()); + } + else if (method == "getPlatfomsInfo") { + nargchk(nrhs==1 && nlhs<=1); + vector vpi; + ocl::getPlatfomsInfo(vpi); + plhs[0] = toStruct(vpi); + } + else if (method == "getCudaEnabledDeviceCount") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(cuda::getCudaEnabledDeviceCount()); + } + else if (method == "getDevice") { + nargchk(nrhs==1 && nlhs<=1); + plhs[0] = MxArray(cuda::getDevice()); + } + else if (method == "setDevice") { + nargchk(nrhs==2 && nlhs==0); + cuda::setDevice(rhs[1].toInt()); + } + else if (method == "resetDevice") { + nargchk(nrhs==1 && nlhs==0); + cuda::resetDevice(); + } + else if (method == "deviceSupports") { + nargchk(nrhs==1 && nlhs<=1); + const char *fields[10] = {"Compute10", "Compute11", "Compute12", + "Compute13", "Compute20", "Compute21", "Compute30", "Compute32", + "Compute35", "Compute50"}; + MxArray s = MxArray::Struct(fields, 10); + s.set(fields[0], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_10)); + s.set(fields[1], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_11)); + s.set(fields[2], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_12)); + s.set(fields[3], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_13)); + s.set(fields[4], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_20)); + s.set(fields[5], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_21)); + s.set(fields[6], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_30)); + s.set(fields[7], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_32)); + s.set(fields[8], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_35)); + s.set(fields[9], cuda::deviceSupports(cv::cuda::FEATURE_SET_COMPUTE_50)); + plhs[0] = s; + } + else if (method == "printCudaDeviceInfo") { + nargchk(nrhs==2 && nlhs==0); + cuda::printCudaDeviceInfo(rhs[1].toInt()); + } + else if (method == "printShortCudaDeviceInfo") { + nargchk(nrhs==2 && nlhs==0); + cuda::printShortCudaDeviceInfo(rhs[1].toInt()); + } + else if (method == "deviceInfo") { + nargchk(nrhs==2 && nlhs<=1); + cuda::DeviceInfo di(rhs[1].toInt()); + plhs[0] = toStruct(di); + } + else if (method == "useTegra") { + nargchk(nrhs==1 && nlhs<=1); +#ifdef HAVE_TEGRA_OPTIMIZATION + plhs[0] = MxArray(tegra::useTegra()); +#else + plhs[0] = MxArray(false); +#endif + } + else if (method == "setUseTegra") { + nargchk(nrhs==2 && nlhs==0); +#ifdef HAVE_TEGRA_OPTIMIZATION + tegra::setUseTegra(rhs[1].toBool()); +#endif + } else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized operation %s", method.c_str()); diff --git a/src/+cv/threshold.cpp b/src/+cv/threshold.cpp index 3a5953756..b47d58da5 100644 --- a/src/+cv/threshold.cpp +++ b/src/+cv/threshold.cpp @@ -53,7 +53,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) thresh = rhs[1].toDouble(); // Process - Mat src(rhs[0].toMat()), // 8u, 16s, 32f, 64f + Mat src(rhs[0].toMat()), // 8u, 16s, 16u, 32f, 64f dst; thresh = threshold(src, dst, thresh, maxval, type); plhs[0] = MxArray(dst); diff --git a/src/+cv/undistortPoints.cpp b/src/+cv/undistortPoints.cpp index c714497b6..d7efad966 100644 --- a/src/+cv/undistortPoints.cpp +++ b/src/+cv/undistortPoints.cpp @@ -26,12 +26,15 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) // Option processing Mat R, P; + TermCriteria criteria(TermCriteria::MAX_ITER, 5, 0.01); for (int i=3; i createBRISK( vector::const_iterator last) { nargchk(((last-first) % 2) == 0); - // second variant for custom patterns + // second/third variants for a custom pattern if ((last-first) >= 2 && !first->isChar()) { vector radiusList(first->toVector()); ++first; vector numberList(first->toVector()); ++first; + int thresh = 20; + int octaves = 3; float dMax = 5.85f, dMin = 8.2f; vector indexChange; for (; first != last; first += 2) { string key((*first).toString()); const MxArray& val = *(first + 1); - if (key == "DMax") + if (key == "Threshold") + thresh = val.toInt(); + else if (key == "Octaves") + octaves = val.toInt(); + else if (key == "DMax") dMax = val.toFloat(); else if (key == "DMin") dMin = val.toFloat(); @@ -44,7 +50,8 @@ Ptr createBRISK( mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - return BRISK::create(radiusList, numberList, dMax, dMin, indexChange); + return BRISK::create(thresh, octaves, + radiusList, numberList, dMax, dMin, indexChange); } // first variant else { @@ -188,6 +195,7 @@ Ptr createGFTTDetector( double qualityLevel = 0.01; double minDistance = 1; int blockSize = 3; + int gradientSize = 3; bool useHarrisDetector = false; double k = 0.04; for (; first != last; first += 2) { @@ -201,6 +209,8 @@ Ptr createGFTTDetector( minDistance = val.toDouble(); else if (key == "BlockSize") blockSize = val.toInt(); + else if (key == "GradientSize") + gradientSize = val.toInt(); else if (key == "HarrisDetector") useHarrisDetector = val.toBool(); else if(key == "K") @@ -210,7 +220,7 @@ Ptr createGFTTDetector( "Unrecognized option %s", key.c_str()); } return GFTTDetector::create(maxCorners, qualityLevel, minDistance, - blockSize, useHarrisDetector, k); + blockSize, gradientSize, useHarrisDetector, k); } Ptr createSimpleBlobDetector( diff --git a/test/UnitTest.m b/test/UnitTest.m index f69baebbd..ec1a9f382 100644 --- a/test/UnitTest.m +++ b/test/UnitTest.m @@ -1,60 +1,59 @@ function varargout = UnitTest(varargin) %UNITTEST Helper function for mexopencv unit testing % - % [results, passed] = UnitTest() - % [...] = UnitTest('OptionName',optionValue, ...) + % [results, passed] = UnitTest() + % [...] = UnitTest('OptionName',optionValue, ...) % % ## Options % * __MainModules__ enable main modules tests. default true % * __ContribModules__ enable contrib modules tests. default false % * __MatchPattern__ regex pattern to filter test classes. Only matched - % tests are kept. default empty (no filtering) + % tests are kept. default empty (no filtering) % * __Verbosity__ Verbosity level. default 1: - % * __0__ quiet mode. - % * __1__ dot-matrix output (one character per test, - % either ".", "S", or "F"). Good for minimal output. - % * __2__ verbose output, one line per test (test name and status), - % along with error message and stack trace if any. + % * __0__ quiet mode. + % * __1__ dot-matrix output (one character per test, either ".", "S", or + % "F"). Good for minimal output. + % * __2__ verbose output, one line per test (test name and status), + % along with error message and stack trace if any. % * __HotLinks__ Allow HTML hyperlinks in error messages. default 'default' % * __FilterStack__ remove test framework from exceptions stack traces. - % default false + % default false % * __DryRun__ dont actually run the tests, just print them. default false % * __Progress__ display a graphical progress bar. default false % * __LogFile__ name of log file (output logged using DIARY). default is a - % timestamped file named `UnitTest_*.log` in current directory. Set - % to empty string to disable logging. + % timestamped file named `UnitTest_*.log` in current directory. Set to + % empty string to disable logging. % * __XUnitFile__ export results to an XML file (in xUnit Format), this - % can be consumed by several CI systems. default is `tests.xml`. Set - % to empty string to disable report. + % can be consumed by several CI systems. default is `tests.xml`. Set to + % empty string to disable report. % % ## Output % * __results__ output structure of results with the following fields: - % * __Duration__ total time elapsed running all tests. - % * __Timestamp__ when test suite was executed (serial date number). - % * __Passed__ number of tests passed. - % * __Failed__ number of tests failed. - % * __Incomplete__ number of tests skipped. - % * __Details__ structure array (one struct for each test) with the - % following fields: - % * __Name__ test name. - % * __Duration__ time elapsed running test. - % * __Timestamp__ when test case was executed. - % * __Passed__ boolean indicating if test passed. - % * __Failed__ boolean indicating if test failed. - % * __Incomplete__ boolean indicating if test was incomplete - % (skipped). - % * __Exception__ exception thrown if failed/skipped. - % * __Logs__ test runner log. + % * __Duration__ total time elapsed running all tests. + % * __Timestamp__ when test suite was executed (serial date number). + % * __Passed__ number of tests passed. + % * __Failed__ number of tests failed. + % * __Incomplete__ number of tests skipped. + % * __Details__ structure array (one struct for each test) with the + % following fields: + % * __Name__ test name. + % * __Duration__ time elapsed running test. + % * __Timestamp__ when test case was executed. + % * __Passed__ boolean indicating if test passed. + % * __Failed__ boolean indicating if test failed. + % * __Incomplete__ boolean indicating if test was incomplete (skipped). + % * __Exception__ exception thrown if failed/skipped. + % * __Logs__ test runner log. % * __passed__ boolean indicates tests status (passed or failed). % % ## Usage % - % cd test; - % results = UnitTest('Verbosity',0); - % t = struct2table(results.Details) - % sortrows(t(t.Duration>1,:), 'Duration') % inspect slow tests - % t(t.Incomplete|t.Failed,:) % inspect non-passing tests - % disp(results.Log) + % cd test; + % results = UnitTest('Verbosity',0); + % t = struct2table(results.Details) + % sortrows(t(t.Duration>1,:), 'Duration') % inspect slow tests + % t(t.Incomplete|t.Failed,:) % inspect non-passing tests + % disp(results.Log) % % See also: matlab.unittest % @@ -91,7 +90,7 @@ function opts = parse_options(varargin) %PARSE_OPTIONS Help function to parse input arguments % - % opts = parse_options(...) + % opts = parse_options(...) % % ## Output % * __opts__ options structure. @@ -161,7 +160,7 @@ function skip = skip_class(fpath) %SKIP_CLASS Determine if test class should be skipped (Octave) % - % skip = skip_class(fpath) + % skip = skip_class(fpath) % % ## Input % * __fpath__ full absolute path to M-file to check. @@ -191,7 +190,7 @@ function tests = testsuite_fromFolder(dpath, opts) %TESTSUITE_FROMFOLDER Create test suite from all test classes in a folder % - % tests = testsuite_fromFolder(dpath, opts) + % tests = testsuite_fromFolder(dpath, opts) % % ## Input % * __dpath__ Folder containing test classes `Test*.m`. @@ -202,7 +201,7 @@ % % ## Usage % - % t = testsuite_fromFolder(fullfile(mexopencv.root(),'test','unit_tests')); + % t = testsuite_fromFolder(fullfile(mexopencv.root(),'test','unit_tests')); % % Test class files must be named with a "Test" prefix. % @@ -243,12 +242,12 @@ function tests = testsuite_fromPackage(name, opts) %TESTSUITE_FROMPACKAGE Create test suite from package % - % tests = testsuite_fromPackage(name, opts) + % tests = testsuite_fromPackage(name, opts) % % ## Input % * __name__ Package. This can be specified as: - % * name of a package as a string - % * metapackage object associated with the package + % * name of a package as a string + % * metapackage object associated with the package % * __opts__ Options structure. % % ## Output @@ -309,14 +308,14 @@ function tests = testsuite_fromClass(klass, opts) %TESTSUITE_FROMCLASS Create test suite from test class % - % tests = testsuite_fromClass(klass, opts) + % tests = testsuite_fromClass(klass, opts) % % ## Input % * __klass__ Test class. This can be specified as: - % * name of class as a string - % * path to class file as a string - % * metaclass object associated with the test class - % * instance of test class itself as object + % * name of class as a string + % * path to class file as a string + % * metaclass object associated with the test class + % * instance of test class itself as object % * __opts__ Options structure. % % ## Output @@ -324,10 +323,10 @@ % % ## Usage % - % t = testsuite_fromClass('TestBlur'); - % t = testsuite_fromClass(fullfile(mexopencv.root(),'test','unit_tests','TestBlur.m')); - % t = testsuite_fromClass(?TestBlur); - % t = testsuite_fromClass(TestBlur()); + % t = testsuite_fromClass('TestBlur'); + % t = testsuite_fromClass(fullfile(mexopencv.root(),'test','unit_tests','TestBlur.m')); + % t = testsuite_fromClass(?TestBlur); + % t = testsuite_fromClass(TestBlur()); % % The class must be on the path and contains test methods % (static functions whose name start with "test"). @@ -379,7 +378,7 @@ function [results, passed] = testsuite_run(tests, opts) %TESTSUITE_RUN Execute all tests in a suite % - % [results, passed] = testsuite_run(tests, opts) + % [results, passed] = testsuite_run(tests, opts) % % ## Input % * __tests__ Cell array of test names to run. @@ -428,7 +427,7 @@ function status = testcase_run(t, opts) %TESTCASE_RUN Run test % - % status = testcase_run(t, opts) + % status = testcase_run(t, opts) % % ## Input % * __t__ test name to run. @@ -436,9 +435,9 @@ % % ## Output % * __status__ Result of running test. One of: - % * __1__ pass - % * __0__ fail - % * __-1__ skip + % * __1__ pass + % * __0__ fail + % * __-1__ skip % % See also: matlab.unittest.TestCase.run % @@ -477,7 +476,7 @@ function varargout = testrunner_monitor(opts, action, t, varargin) %TESTRUNNER_MONITOR Test runner monitor % - % [...] = testrunner_monitor(opts, action, t, ...) + % [...] = testrunner_monitor(opts, action, t, ...) % % ## Input % * __opts__ Options structure. @@ -615,7 +614,7 @@ function print_version() %PRINT_VERSION Display MATLAB/Octave version % - % print_summary(results) + % print_summary(results) % % See also: ver, version % @@ -635,7 +634,7 @@ function print_version() function print_summary(results) %PRINT_SUMMARY Display summary of tests totals % - % print_summary(results) + % print_summary(results) % % ## Input % * __results__ output structure of results. @@ -650,7 +649,7 @@ function print_summary(results) function print_faults(results, opts) %PRINT_FAULTS Display list of exceptions thrown from tests % - % print_faults(results, opts) + % print_faults(results, opts) % % ## Input % * __results__ output structure of results. @@ -683,7 +682,7 @@ function print_faults(results, opts) function export_xunit(results, opts) %EXPORT_XUNIT Save test results in xUnit XML Format % - % export_xunit(results, opts) + % export_xunit(results, opts) % % ## Input % * __results__ output structure of results. @@ -746,11 +745,11 @@ function export_xunit(results, opts) function str = exception_getReport(ME, opts) %EXCEPTION_GETREPORT Get error message for exception % - % str = exception_getReport(ME, opts) + % str = exception_getReport(ME, opts) % % ## Input % * __ME__ exception caught. Either a MATLAB MException object or an - % Octave error structure. + % Octave error structure. % * __opts__ Options structure. % % ## Output diff --git a/test/ellipses.jpg b/test/ellipses.jpg new file mode 100644 index 000000000..def6f7b40 Binary files /dev/null and b/test/ellipses.jpg differ diff --git a/test/unit_tests/TestDrawContours.m b/test/unit_tests/TestDrawContours.m index 9c25af6c1..97a07598a 100644 --- a/test/unit_tests/TestDrawContours.m +++ b/test/unit_tests/TestDrawContours.m @@ -34,7 +34,7 @@ function test_2 [contours, hierarchy] = cv.findContours(TestDrawContours.img); - contours = cellfun(@(c) cat(1,c{:}), contours, 'Uniform',false); + contours = cellfun(@(c) cat(1,c{:}), contours, 'UniformOutput',false); hierarchy = cat(1, hierarchy{:}); img = repmat(TestDrawContours.img, [1 1 3]); im = cv.drawContours(img, contours, ... diff --git a/test/unit_tests/TestEigenNonSymmetric.m b/test/unit_tests/TestEigenNonSymmetric.m new file mode 100644 index 000000000..68bf821cc --- /dev/null +++ b/test/unit_tests/TestEigenNonSymmetric.m @@ -0,0 +1,21 @@ +classdef TestEigenNonSymmetric + %TestEigenNonSymmetric + + methods (Static) + function test_1 + A = randn(5); + evals = cv.eigenNonSymmetric(A); + [evals,evecs] = cv.eigenNonSymmetric(A); + end + + function test_error_argnum + try + cv.eigenNonSymmetric(); + throw('UnitTest:Fail'); + catch e + assert(strcmp(e.identifier,'mexopencv:error')); + end + end + end + +end diff --git a/test/unit_tests/TestFindContours.m b/test/unit_tests/TestFindContours.m index f4acd3ef4..4202002bd 100644 --- a/test/unit_tests/TestFindContours.m +++ b/test/unit_tests/TestFindContours.m @@ -33,7 +33,7 @@ {'vector', 'numel',4, 'integer', '<',numel(contours)}), ... hierarchy); end - contours = cellfun(@(c) cat(1,c{:}), contours, 'Uniform',false); + contours = cellfun(@(c) cat(1,c{:}), contours, 'UniformOutput',false); hierarchy = cat(1, hierarchy{:}); end diff --git a/test/unit_tests/TestFitEllipse.m b/test/unit_tests/TestFitEllipse.m index 27baeb649..2a6665718 100644 --- a/test/unit_tests/TestFitEllipse.m +++ b/test/unit_tests/TestFitEllipse.m @@ -19,6 +19,19 @@ assert(all(ismember({'center','size','angle'}, fieldnames(rct)))); end + function test_methods + % noisy circle + t = linspace(0, 2*pi, 50).'; + points = bsxfun(@plus, [cos(t) sin(t)]*100, [150 150]); + points = bsxfun(@plus, points, randn(size(points))*10); + + algs = {'Linear', 'Direct', 'AMS'}; + for i=1:numel(algs) + rct = cv.fitEllipse(points, 'Method',algs{i}); + validateattributes(rct, {'struct'}, {'scalar'}); + end + end + function test_error_argnum try cv.fitEllipse(); diff --git a/test/unit_tests/TestHOGDescriptor.m b/test/unit_tests/TestHOGDescriptor.m index c51811227..c48e5f4ec 100644 --- a/test/unit_tests/TestHOGDescriptor.m +++ b/test/unit_tests/TestHOGDescriptor.m @@ -67,7 +67,7 @@ end % group similar rectangles - rects = cellfun(@(p) [p hog.WinSize], pts, 'Uniform',false); + rects = cellfun(@(p) [p hog.WinSize], pts, 'UniformOutput',false); [rects, weights] = hog.groupRectangles(rects, weights, ... 'GroupThreshold',1, 'EPS',0.2); if ~isempty(rects) diff --git a/test/unit_tests/TestImencode.m b/test/unit_tests/TestImencode.m index f1131e557..5c5157590 100644 --- a/test/unit_tests/TestImencode.m +++ b/test/unit_tests/TestImencode.m @@ -61,7 +61,7 @@ function test_error_unrecognized_extension %TODO: crashes Octave - if mexopencv.isOctave() + if true error('mexopencv:testskip', 'todo'); end diff --git a/test/unit_tests/TestImwrite.m b/test/unit_tests/TestImwrite.m index 2f0734206..aa9d0e15a 100644 --- a/test/unit_tests/TestImwrite.m +++ b/test/unit_tests/TestImwrite.m @@ -63,7 +63,7 @@ function test_error_unrecognized_extension %TODO: crashes Octave - if mexopencv.isOctave() + if true error('mexopencv:testskip', 'todo'); end diff --git a/test/unit_tests/TestKeyPointsFilter.m b/test/unit_tests/TestKeyPointsFilter.m index 190237cde..79f2b880f 100644 --- a/test/unit_tests/TestKeyPointsFilter.m +++ b/test/unit_tests/TestKeyPointsFilter.m @@ -39,6 +39,10 @@ validateattributes(kp, {'struct'}, {'vector'}); assert(all(ismember(TestKeyPointsFilter.fields, fieldnames(kp)))); + kp = cv.KeyPointsFilter.removeDuplicatedSorted(kpts); + validateattributes(kp, {'struct'}, {'vector'}); + assert(all(ismember(TestKeyPointsFilter.fields, fieldnames(kp)))); + num = 50; kp = cv.KeyPointsFilter.retainBest(kpts, num); validateattributes(kp, {'struct'}, {'vector'}); diff --git a/test/unit_tests/TestNet.m b/test/unit_tests/TestNet.m index d31b3cb80..2abadb372 100644 --- a/test/unit_tests/TestNet.m +++ b/test/unit_tests/TestNet.m @@ -24,8 +24,8 @@ % forward pass (blob from multiple images) blob = cv.Net.blobFromImages({img1,img2}, 'Size',[224 224]); - net.setInput(blob, 'data'); - prob = net.forward('prob'); + net.setInput(blob); + prob = net.forward(); validateattributes(prob, {'numeric'}, ... {'size',[2 1000], 'real', '>=',0, '<=',1}); [p,idx] = max(prob,[],2); @@ -51,6 +51,16 @@ blobs = net.forwardAll('conv1/7x7_s2'); end + function test_shrink_caffe_fp16 + model = fullfile(mexopencv.root(), 'test', 'dnn', 'GoogLeNet', ... + 'bvlc_googlenet.caffemodel'); + model_fp16 = fullfile(tempdir(), 'bvlc_googlenet_fp16.caffemodel'); + + cObj = onCleanup(@() delete(model_fp16)); + cv.Net.shrinkCaffeModel(model, model_fp16); + assert(exist(model_fp16, 'file') == 2); + end + function test_layers % Convolution layer % (64 filters each of size 7x7, 3x3 padding, 2x2 stride) @@ -71,14 +81,13 @@ % add layer net = cv.Net(); lp_id = net.addLayer(lp.name, lp.type, lp); - validateattributes(lp_id, {'numeric'}, {'scalar', 'integer'}); + validateattributes(lp_id, {'int32'}, {'scalar', 'integer'}); assert(lp_id ~= -1); - lp_id = int32(lp_id); % connect input layer to conv layer net.setInputsNames('data'); if true - net.connect(int32(0), 0, int32(lp_id), 0); + net.connect(0, 0, lp_id, 0); elseif true %net.connect('_input.0', lp.name); net.connect('_input', lp.name); @@ -125,7 +134,6 @@ id = net.getLayerId(names{1}); validateattributes(id, {'numeric'}, {'scalar', 'integer'}); - id = int32(id); assert(lp_id ~= -1); %assert(isequal(id, lp_id)) @@ -155,14 +163,13 @@ function net = load_bvlc_googlenet() %TODO: download files (or run the sample "caffe_googlenet_demo.m") - rootdir = fullfile(mexopencv.root(), 'test', 'dnn'); - modelTxt = fullfile(rootdir, 'bvlc_googlenet.prototxt'); + rootdir = fullfile(mexopencv.root(), 'test', 'dnn', 'GoogLeNet'); + modelTxt = fullfile(rootdir, 'deploy.prototxt'); modelBin = fullfile(rootdir, 'bvlc_googlenet.caffemodel'); if exist(modelTxt, 'file') ~= 2 || exist(modelBin, 'file') ~= 2 error('mexopencv:testskip', 'missing data'); end - net = cv.Net(); - net.import('Caffe', modelTxt, modelBin); + net = cv.Net('Caffe', modelTxt, modelBin); assert(~net.empty()); end diff --git a/test/unit_tests/TestTickMeter.m b/test/unit_tests/TestTickMeter.m index ebd838126..d8a6a25af 100644 --- a/test/unit_tests/TestTickMeter.m +++ b/test/unit_tests/TestTickMeter.m @@ -6,14 +6,26 @@ tm = cv.TickMeter(); tm.reset(); tm.start(); - pause(0.2); + pause(0.1); tm.stop(); validateattributes(tm.TimeTicks, {'int64'}, {'scalar'}); validateattributes(tm.TimeMicro, {'double'}, {'scalar'}); validateattributes(tm.TimeMilli, {'double'}, {'scalar'}); validateattributes(tm.TimeSec, {'double'}, {'scalar'}); validateattributes(tm.Counter, {'int64'}, {'scalar'}); - %assert(abs(tm.TimeSec - 0.2) < 0.02); + %assert(abs(tm.TimeSec - 0.1) < 0.02); + end + + function test_2 + t1 = cv.TickMeter.getTickCount(); + pause(0.1); + t2 = cv.TickMeter.getTickCount(); + f = cv.TickMeter.getTickFrequency(); + tsec = double(t2 - t1) / f; + validateattributes(t1, {'int64'}, {'scalar'}); + validateattributes(t2, {'int64'}, {'scalar'}); + validateattributes(f, {'double'}, {'scalar'}); + %assert(abs(tsec - 0.1) < 0.02); end end diff --git a/test/unit_tests/TestUtils.m b/test/unit_tests/TestUtils.m index cf63e1e0f..1405e7f8c 100644 --- a/test/unit_tests/TestUtils.m +++ b/test/unit_tests/TestUtils.m @@ -2,34 +2,115 @@ %TestUtils methods (Static) - function test_1 + function test_version info = cv.Utils.getBuildInformation(); - assert(ischar(info) && ~isempty(info)); + validateattributes(info, {'char'}, {'nonempty'}); v = cv.Utils.version(); - assert(ischar(v) && ~isempty(v)); + validateattributes(v, {'char'}, {'row', 'nonempty'}); end - function test_2 - support = cv.Utils.checkHardwareSupport(); - assert(isstruct(support) && isscalar(support)); + function test_cpu + s = cv.Utils.checkHardwareSupport(); + validateattributes(s, {'struct'}, {'scalar'}); end - function test_3 + function test_num_cpu n = cv.Utils.getNumberOfCPUs(); validateattributes(n, {'numeric'}, {'scalar', 'integer', 'nonnegative'}); end - function test_4 + function test_num_threads n = cv.Utils.getNumThreads(); validateattributes(n, {'numeric'}, {'scalar', 'integer'}); cv.Utils.setNumThreads(n); end - function test_5 - tf = cv.Utils.useOptimized(); - validateattributes(tf, {'logical'}, {'scalar'}); - cv.Utils.setUseOptimized(tf); + function test_optimization + b = cv.Utils.useOptimized(); + validateattributes(b, {'logical'}, {'scalar'}); + cv.Utils.setUseOptimized(b); + end + + function test_ipp + str = cv.Utils.getIppVersion(); + validateattributes(str, {'char'}, {'row', 'nonempty'}); + + if ~strcmpi(str, 'disabled') + b = cv.Utils.useIPP(); + validateattributes(b, {'logical'}, {'scalar'}); + cv.Utils.setUseIPP(b); + end + end + + function test_ovx + b = cv.Utils.haveOpenVX(); + validateattributes(b, {'logical'}, {'scalar'}); + + if b + b = cv.Utils.useOpenVX(); + validateattributes(b, {'logical'}, {'scalar'}); + cv.Utils.setUseOpenVX(b); + end + end + + function test_ocl_1 + b = cv.Utils.haveOpenCL(); + validateattributes(b, {'logical'}, {'scalar'}); + + b = cv.Utils.haveAmdBlas(); + validateattributes(b, {'logical'}, {'scalar'}); + + b = cv.Utils.haveAmdFft(); + validateattributes(b, {'logical'}, {'scalar'}); + + b = cv.Utils.haveSVM(); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_ocl_2 + if cv.Utils.haveOpenCL() + b = cv.Utils.useOpenCL(); + validateattributes(b, {'logical'}, {'scalar'}); + cv.Utils.setUseOpenCL(b); + end + end + + function test_ocl_3 + if cv.Utils.haveOpenCL() + p = cv.Utils.getPlatfomsInfo(); + validateattributes(p, {'struct'}, {'vector'}); + end + end + + function test_cuda + n = cv.Utils.getCudaEnabledDeviceCount(); + validateattributes(n, {'numeric'}, {'scalar', 'integer'}); + if n > 0 + id = cv.Utils.getDevice(); + validateattributes(id, {'numeric'}, {'scalar', 'integer'}); + cv.Utils.setDevice(id); + + s = cv.Utils.deviceSupports(); + validateattributes(s, {'struct'}, {'scalar'}); + + cv.Utils.printShortCudaDeviceInfo(id); + cv.Utils.printCudaDeviceInfo(id); + + s = cv.Utils.deviceInfo(id); + validateattributes(s, {'struct'}, {'scalar'}); + + cv.Utils.resetDevice(); + end + end + + function test_tega + % functions only defined for Tegra SoC + if false + b = cv.Utils.useTegra(); + validateattributes(b, {'logical'}, {'scalar'}); + cv.Utils.setUseTegra(b); + end end end diff --git a/test/unit_tests/TestVideoWriter.m b/test/unit_tests/TestVideoWriter.m index e285f7fb2..6872ef323 100644 --- a/test/unit_tests/TestVideoWriter.m +++ b/test/unit_tests/TestVideoWriter.m @@ -4,7 +4,7 @@ methods (Static) function test_1 % we use "Microsoft Video 1" codec (Windows only) - if ~ispc || mexopencv.isOctave() + if ~ispc() || mexopencv.isOctave() error('mexopencv:testskip', 'codecs'); end @@ -35,24 +35,25 @@ end function test_2 - %TODO: this tests some common codecs that worked on my Windows - % machine, no guarantees elsewhere! + % fourCC + extension if true - error('mexopencv:testskip', 'codecs'); + % builtin MJPG encoder, should work across all systems + codecs = {'MJPG' '.avi'}; + else + %NOTE: these are some common codecs that worked on my Windows + % machine, no guarantees elsewhere! + codecs = { ... + 'PIM1' '.mpg' ; ... + 'MPG1' '.mpg' ; ... + 'MSVC' '.avi' ; ... + 'MJPG' '.avi' ; ... + 'XVID' '.avi' ; ... + 'WMV2' '.wmv' ; ... + 'FLV1' '.flv' ; ... + 'MP4V' '.mp4' ; ... + 'MP42' '.mkv' ; ... + }; end - - % fourCC + extension - codecs = { ... - 'PIM1' '.mpg' ; ... - 'MPG1' '.mpg' ; ... - 'MSVC' '.avi' ; ... - 'MJPG' '.avi' ; ... - 'XVID' '.avi' ; ... - 'WMV2' '.wmv' ; ... - 'FLV1' '.flv' ; ... - 'MP4V' '.mp4' ; ... - 'MP42' '.mkv' ; ... - }; w = 640; h = 480; for i=1:size(codecs,1) fname = [tempname() codecs{i,2}]; diff --git a/utils/MDoc.m b/utils/MDoc.m index 9c2daa644..8f3e3608e 100644 --- a/utils/MDoc.m +++ b/utils/MDoc.m @@ -1,207 +1,1333 @@ -classdef MDoc < handle +function varargout = MDoc(varargin) %MDOC mexopencv documentation utility % - % This class generates a simple Matlab HTML documentation using - % matlab's internal utility help2html. To generate documentation, - % create an instance of this class: - % - % addpath('utils'); - % addpath('opencv_contrib'); - % MDoc; - % - % Once you run this command, you can find html documentation under - % MDoc.DIR directory. - % - - properties (Constant) - % Directory to place documentation - DIR = fullfile(mexopencv.root(),'doc','matlab') - % Default CSS file - CSS = fullfile(matlabroot,'toolbox','matlab','helptools',... - 'private','helpwin.css') - end - - properties (SetAccess = private) - % Internal use - yet = {}; - % Internal use - processed = {}; - end - - methods - function this = MDoc - %MDOC execute MDoc - if ~exist(MDoc.DIR,'dir'), mkdir(MDoc.DIR); end - - % Copy CSS file - txt = fileread(MDoc.CSS); - txt = this.process_css(txt); - fid = fopen(fullfile(MDoc.DIR,'helpwin.css'),'w'); - fprintf(fid,'%s',txt); - fclose(fid); - - % Make contents index - this.process_index(); - - % Get a list of functions - list = [... - dir(fullfile(mexopencv.root(),'+cv','*.m')); ... - dir(fullfile(mexopencv.root(),'opencv_contrib','+cv','*.m')) ... - ]; - this.yet = strrep({list.name},'.m',''); - while ~isempty(this.yet) - fname = this.yet{1}; - this.process(fname); - this.yet = setdiff(this.yet, fname); - this.processed = union(this.processed,fname); - end + % MDoc() + % MDoc(action) + % MDoc(topic) + % MDoc(..., 'OptionName',optionValue, ...) + % filename = MDoc(...) + % + % ## Input + % * __action__ Special targets (default `-all`). One of: + % * `-clean`: delete all generated docs. + % * `-wiki`: generate HTML wiki files. + % * `-all`: generate full documentation for all mexopencv functions. + % * `-index`: only generate table of contents. + % * `-indexcat`: only generate table of contents (grouped by modules). + % * `-contents`: generate Contents.m file. + % * `-helptoc`: generate helpfuncbycat.xml file (TOC function reference). + % * __topic__ generate docs for specified mexopencv function. + % + % ## Output + % * __filename__ optional, generated HTML file path. + % + % ## Options + % * __Force__ Unconditionally re-create HTML output files. default true + % * __MarkdownRender__ Process markdown. default true + % * __MarkdownBackend__ Markdown parser implementation to use. The + % following backends are available to choose from: + % * 'CommonMark-Java' (https://github.com/atlassian/commonmark-java/) + % * 'MarkdownPapers' (https://github.com/lruiz/MarkdownPapers/) + % * 'pegdown' (https://github.com/sirthias/pegdown/) + % * 'MarkdownJ' (https://github.com/myabc/markdownj/) + % * 'Markdown4j' (https://github.com/jdcasey/markdown4j/) + % * 'Txtmark' (https://github.com/rjeschke/txtmark/) + % * __AutoLinking__ search for "http://" in text and convert them into + % hyperlinks. default false + % * __AutoLinkingDeep__ search for "www." in text and convert them into + % hyperlinks. default false + % * __AutoLinkingCV__ search for "cv.*" functions in text, and convert + % them into hyperlinks. default true + % * __SyntaxHighlighting__ syntax highlighted code. default true + % * __PrettifyLang__ language of syntax highlighting. default 'lang-matlab' + % * __TableSorting__ sortable HTML tables (used in index.html). + % default true + % * __NoExternals__ download all CSS/JS external resources locally. + % default true + % + % ## Options (batch mode) + % * __IndexGroups__ group by OpenCV module in index. default true + % * __IgnoreHandleClass__ skip generating redundant docs for methods + % inherited from "handle" base class. default true + % * __Verbose__ Display info messages. default true + % * __Progress__ Show progress bar. default true + % + % This function generates simple MATLAB HTML documentation using + % MATLAB's internal utility help2html. + % + % See also: doc, help, help2html, publish + % + + % validate input arguments + nargoutchk(0, 1); + opts = parse_options(varargin{:}); + + % initialize Java dependencies + init_jsoup(opts); + init_markdown(opts); + + if strcmp(opts.topic, '-clean') + % clean all docs + if isdir(opts.DIR) + if opts.Verbose, fprintf('Cleaning %s\n', opts.DIR); end + rmdir(opts.DIR, 's'); + end + return; + elseif ~any(strcmp(opts.topic, {'-wiki', '-contents', '-helptoc'})) + if ~isdir(opts.DIR) + % prepare output directory and CSS stylesheets + if opts.Verbose, fprintf('Creating %s\n', opts.DIR); end + mkdir(opts.DIR); + copy_stylesheets(opts); end + end - function txt = process(this, func) - %PROCESS process an entity - txt = help2html(['cv.',func]); - txt = strrep(txt,'&','&'); - txt = strrep(txt,'<','<'); - txt = strrep(txt,'>','>'); - filename = fullfile(MDoc.DIR,[func,'.html']); - fprintf('%s\n',filename); - - % Filter - txt = this.filter_text(txt); - txt = this.markdown(txt); - - % Write - fid = fopen(filename,'w'); - fprintf(fid,'%s',txt); - fclose(fid); - end - - function txt = filter_text(this, txt) - %FILTER_TEXT Filter anchor tags - txt = strrep(txt,sprintf('file:///%s',MDoc.CSS),'helpwin.css'); - txt = regexprep(txt,'([^<]*)','$1'); - [splt,tokens] = regexp(txt,'([^<]*)',... - 'split','tokens'); - tokens = cellfun(@(tok) this.make_link(tok{1},tok{2}),... - tokens,'UniformOutput',false); - txt = [splt;[tokens,{''}]]; - txt = [txt{:}]; - end - - function txt = make_link(this, href, txt) - %MAKE_LINK Rewrite hyperlinks - A = '%s'; - - % Link to raw codes: do nothing - tok = regexp(href,'open (.*)', 'tokens', 'once'); - if ~isempty(tok) - txt = ''; + % generate docs + [filename, isbatch, needed] = get_output_filename(opts, opts.topic); + if needed + switch opts.topic + case '-wiki' + download_wiki(opts); + generate_wiki(opts); + case '-all' + generate_all_docs(opts); + case {'-index', '-indexcat'} + txt = generate_index(opts, opts.topic); + case '-contents' + txt = generate_contents_m(opts); + case '-helptoc' + txt = generate_helptoc_xml(opts); + otherwise + txt = generate_doc(opts, opts.topic); + end + if ~isbatch + % write to file + assert(~isempty(txt), 'No doc generated'); + filewrite(filename, txt); + end + end + + % return/show result + if nargout > 0 + % return output filename + varargout{1} = filename; + else + if ~isbatch + % open documentation in embedded browser or editor + [~,~,ext] = fileparts(filename); + if strcmp(ext, '.html') + web(filename, '-new'); + else + edit(filename); end + else + % show output directory + if ispc() + winopen(filename); + else + disp(filename); + end + end + end +end + +function opts = parse_options(varargin) + %PARSE_OPTIONS Parse function input argument + + % helper function to validate true/false arguments + isbool = @(x) isscalar(x) && (islogical(x) || isnumeric(x)); + + % parse input arguments + p = inputParser(); + p.addOptional('topic', '-all', @ischar); + p.addParameter('Force', true, isbool); + p.addParameter('MarkdownRender', true, isbool); + p.addParameter('MarkdownBackend', 'commonmark-java', @ischar); + p.addParameter('AutoLinking', false, isbool); + p.addParameter('AutoLinkingDeep', false, isbool); + p.addParameter('AutoLinkingCV', true, isbool); + p.addParameter('SyntaxHighlighting', true, isbool); + p.addParameter('PrettifyLang', 'lang-matlab', @ischar); + p.addParameter('TableSorting', true, isbool); + p.addParameter('NoExternals', true, isbool); + p.addParameter('IndexGroups', true, isbool); + p.addParameter('IgnoreHandleClass', true, isbool); + p.addParameter('Verbose', true, isbool); + p.addParameter('Progress', true, isbool); + + % options struct + p.parse(varargin{:}); + opts = p.Results; + + % documentation output directory + opts.DIR = fullfile(mexopencv.root(), 'doc', 'matlab'); + + % validate topic/action + if strncmp(opts.topic, '-', 1) + opts.topic = validatestring(opts.topic, ... + {'-clean', '-wiki', '-all', '-index', '-indexcat', '-contents', '-helptoc'}); + elseif ~strncmp(opts.topic, 'cv.', 3) + % prepend "cv." prefix to topic name if necessary + opts.topic = ['cv.' opts.topic]; + end + + % list of recognized backends + backends = { + 'commonmark-java' + 'MarkdownPapers' + 'pegdown' + 'markdownj' + 'markdown4j' + 'txtmark' + }; + opts.MarkdownBackend = validatestring(opts.MarkdownBackend, backends); - % Link to index - if strcmp(href,'helpwin') - txt = sprintf(A, 'index.html', 'Index'); + opts.Ignore = {}; + if opts.IgnoreHandleClass + % methods/properties/events inherited from "handle" + opts.Ignore = union(opts.Ignore, methods('handle')); + opts.Ignore = union(opts.Ignore, properties('handle')); + opts.Ignore = union(opts.Ignore, events('handle')); + opts.Ignore = setdiff(opts.Ignore, 'delete'); % exclude dtor + end +end + +function init_jsoup(opts) + %INIT_JSOUP Download and setup the jsoup Java library + + persistent initialized + if isempty(initialized), initialized = false; end + if ~initialized + % download if it doesn't exist + dname = fullfile(mexopencv.root(), 'utils', 'jars', 'jsoup'); + if ~isdir(dname) + MavenDownload('org.jsoup', 'jsoup', 'OutDir',dname, ... + 'Verbose',opts.Verbose); + end + + % add all JAR files to classpath + if opts.Verbose, disp('Initializing jsoup'); end + JavaAddJarDir(dname); + + % mark as initialized + initialized = true; + end +end + +function init_markdown(opts) + %INIT_MARKDOWN Download and setup backend Markdown Java libraries + + persistent initialized + if isempty(initialized), initialized = {}; end + if ~ismember(opts.MarkdownBackend, initialized) + % download if it doesn't exist + dname = fullfile(mexopencv.root(), 'utils', 'jars', opts.MarkdownBackend); + if ~isdir(dname) + % group and artifact ids + switch opts.MarkdownBackend + case 'commonmark-java' + groupId = 'com.atlassian.commonmark'; + artifactId = 'commonmark'; + case 'MarkdownPapers' + groupId = 'org.tautua.markdownpapers'; + artifactId = 'markdownpapers-core'; + case 'pegdown' + groupId = 'org.pegdown'; + artifactId = 'pegdown'; + case 'markdownj' + groupId = 'org.markdownj'; + artifactId = 'markdownj-core'; + case 'markdown4j' + groupId = 'org.commonjava.googlecode.markdown4j'; + artifactId = 'markdown4j'; + case 'txtmark' + groupId = 'com.github.rjeschke'; + artifactId = 'txtmark'; + otherwise + error('Unrecognized Markdown backend') end + MavenDownload(groupId, artifactId, 'OutDir',dname, ... + 'Verbose',opts.Verbose); + end + + % add all JAR files to classpath + if opts.Verbose, fprintf('Initializing %s\n', opts.MarkdownBackend); end + JavaAddJarDir(dname); + + % mark as initialized + initialized = union(initialized, opts.MarkdownBackend); + end +end + +function JavaAddJarDir(dname) + %JAVAADDJARDIR Add all jar files inside a directory to Java class path + + % list of JAR files inside directory + jars = dir(fullfile(dname, '*.jar')); + jars = {jars.name}; + + % only keep those not already on the Java class path + jcp = javaclasspath(); + idx = cellfun(@(j) all(cellfun(@isempty, strfind(jcp, j))), jars); + jars = jars(idx); - % Link to another function - tok = regexp(href,'helpwin cv\.(.*)', 'tokens', 'once'); - if isempty(tok) - tok = regexp(href,'helpwin\(''cv\.(.*)''\)', 'tokens', 'once'); + % put remaining jars on classpath + if ~isempty(jars) + jars = cellfun(@(j) fullfile(dname, j), jars, 'UniformOutput',false); + javaaddpath(jars); + end +end + +function copy_stylesheets(opts) + %COPY_STYLESHEETS Copy CSS stylesheets to output directory + + % copy default stylesheet locally + CSS = fullfile(toolboxdir('matlab'), 'helptools', 'private', 'helpwin.css'); + copyfile(CSS, opts.DIR); + + % copy second stylesheet with customization + CSS = fullfile(fileparts(mfilename('fullpath')), 'helpwin_custom.css'); + copyfile(CSS, opts.DIR); +end + +function [filename, isbatch, needed] = get_output_filename(opts, topic, fname) + %GET_OUTPUT_FILENAME Output filename according to action + + isbatch = false; + switch topic + case '-wiki' + if nargin > 2 + [~,fname,~] = fileparts(fname); + filename = [strrep(lower(fname), '-', '_') '.html']; + filename = fullfile(mexopencv.root(), 'doc', 'wiki', filename); + else + filename = fullfile(mexopencv.root(), 'doc', 'wiki'); + isbatch = true; end - if ~isempty(tok) - fname = strrep(tok{1},'/','.'); - href = [fname,'.html']; - txt = sprintf(A, href, txt); - if ~any(strcmp(this.processed,fname)) - this.yet = union(this.yet,fname); - end + case '-all' + filename = opts.DIR; + isbatch = true; + case '-index' + filename = fullfile(opts.DIR, 'index.html'); + case '-indexcat' + filename = fullfile(opts.DIR, 'index_cat.html'); + case '-contents' + filename = fullfile(mexopencv.root(), 'Contents.m'); + case '-helptoc' + filename = fullfile(mexopencv.root(), 'doc', 'helpfuncbycat.xml'); + otherwise + filename = fullfile(opts.DIR, [topic '.html']); + end + + % checks if file already exists + needed = isbatch || opts.Force || exist(filename, 'file') ~= 2; +end + +function files = download_wiki(opts) + %DOWNLOAD_WIKI Download wiki files from GitHub + + % list of Wiki files + dname = get_output_filename(opts, '-wiki'); + files = { + 'README.md' + 'Home.md' + 'Getting-Started.md' + 'Developing-a-new-MEX-function.md' + 'Gotchas.md' + 'Installation-(Windows,-MATLAB,-OpenCV-3).md' + 'Installation-(Windows,-Octave,-OpenCV-3).md' + 'Installation-(Linux,-MATLAB,-OpenCV-3).md' + 'Installation-(Linux,-Octave,-OpenCV-3).md' + 'Installation-(Windows,-MATLAB,-OpenCV-2).md' + 'Troubleshooting-(Windows).md' + 'Troubleshooting-(UNIX).md' + }; + + % download from GitHub + % (git clone https://github.com/kyamagu/mexopencv.wiki.git) + url = 'https://rawgit.com/wiki/kyamagu/mexopencv/'; + if ~isdir(dname), mkdir(dname); end + copyfile(fullfile(mexopencv.root(), 'README.markdown'), fullfile(dname, files{1})); + for i=2:numel(files) + fname = fullfile(dname, files{i}); + if exist(fname, 'file') ~= 2 % || opts.Force + if opts.Verbose, fprintf('Downloading %s...\n', files{i}); end + urlwrite([url files{i}], fname); + end + end +end + +function generate_wiki(opts) + %GENERATE_WIKI Generate HTML docs from Wiki files + + % process markdown files + dname = get_output_filename(opts, '-wiki'); + files = dir(fullfile(dname, '*.md')); + for i=1:numel(files) + % convert and save as HTML + fname = fullfile(dname, files(i).name); + [filename, ~, needed] = get_output_filename(opts, '-wiki', fname); + if needed + txt = wiki_md2html(opts, fname); + assert(~isempty(txt), 'Failed to convert Markdown'); + filewrite(filename, txt); + end + end +end + +function generate_all_docs(opts) + %GENERATE_ALL_DOCS Generate full docs for all functions + + % create index.html + [filename, ~, needed] = get_output_filename(opts, '-index'); + if needed + if opts.Verbose, disp('Creating index...'); end + txt = generate_index(opts, '-index'); + assert(~isempty(txt), 'Failed to generate index'); + filewrite(filename, txt); + end + + % create index_cat.html + [filename, ~, needed] = get_output_filename(opts, '-indexcat'); + if needed + if opts.Verbose, disp('Creating index...'); end + txt = generate_index(opts, '-indexcat'); + assert(~isempty(txt), 'Failed to generate index'); + filewrite(filename, txt); + end + + % get a list of functions/classes + list = enumerate_mexopencv_members(opts); + + % show progress + if opts.Progress + hWait = waitbar(0, 'Generating docs...', 'Name',mfilename(), ... + 'CreateCancelBtn','setappdata(gcbf,''cancel'',true)'); + setappdata(hWait, 'cancel',false); + wbCleanObj = onCleanup(@() delete(hWait)); + end + + % process all functions + if opts.Verbose, disp('Creating docs...'); end + for i=1:numel(list) + % next topic to process + topic = list{i}; + if opts.Progress + waitbar(i/numel(list), hWait, strrep(topic, '_', '\_')); + if getappdata(hWait, 'cancel'), break; end + end + + % generate HTML doc file + [filename, ~, needed] = get_output_filename(opts, topic); + if needed + txt = generate_doc(opts, topic); + if ~isempty(txt) + filewrite(filename, txt); + status = 'DONE'; + else + % non-existant cv function + % (doc/help/help2html cannot process hidden and private methods) + status = 'MISSING'; end + if opts.Verbose, fprintf('[%s] %s\n', status, topic); end end + end +end - function txt = markdown(this, txt) - %MARKDOWN add html tags - [splt,tok] = regexp(txt,... - '(
\s*
.*
\s*
)','split','tokens'); - if ~isempty(tok) - tok = regexp(tok{1}{1},'
\s*(.*)
','tokens'); - if ~isempty(tok) - % remove space inserted by matlab - tok = regexprep(tok{1}{1},'\n ','\n'); - % remove function name in the header - tok = regexprep(tok,'^[A-Z0-9_]+\s+(.*)$','$1'); - % markup - tok = MarkdownPapers(tok); - % autolink cv functions - tok = regexprep(tok,'cv\.([a-zA-Z0-9_]+)([:;,\.\(\s])',... - 'cv.$1$2'); - txt = [splt;{sprintf('
%s
',tok),''}]; - txt = [txt{:}]; - end +function txt = generate_doc(opts, topic) + %GENERATE_DOC Generate HTML docs for a function, class method/member, etc. + + % convert M-help into parsed HTML document + jdoc = parse_help2html(opts, topic); + if isempty(jdoc) + txt = ''; + return; + end + + % convert document back to HTML string + txt = char(jdoc.toString()); +end + +function txt = generate_index(opts, topic) + %GENERATE_INDEX Create index.html/index_cat.html containing list of all functions + + % HTML doc of cv package function/class names and their H1-lines + opts.MarkdownRender = false; % turn off markdown processing for index + jdoc = parse_help2html(opts, 'cv'); + if isempty(jdoc) + txt = ''; + return; + end + + % replace title near the top + div = jdoc.select('div.title').empty().first(); + assert(~isempty(div), '
not found'); + a = div.appendElement('a'); + a.text('mexopencv').attr('href', 'https://github.com/kyamagu/mexopencv'); + % append description after it + p = jdoc.createElement('p').addClass('h1line'); + p.appendText('Collection and development kit of MATLAB MEX functions for '); + p.appendElement('a').text('OpenCV').attr('href', 'https://opencv.org/'); + p.appendText(' library.'); + div.after(p); + + % extract listing of cv package function/class names and their H1-lines + pre = jdoc.select('div.helptext > pre').first(); + assert(~isempty(pre), '
 not found');
+    txt = char(pre.text());
+    t = create_files_table(opts, txt);
+
+    % format it as HTML table
+    switch topic
+        case '-index'
+            % one big table
+            txt = table_to_index_html(opts, t);
+        case '-indexcat'
+            % one table per module
+            txt = table_to_index_cat_html(opts, t);
+        otherwise
+            txt = '';
+    end
+
+    % insert table
+    div = pre.parent();
+    assert(~isempty(div), '
not found'); + div.removeClass('helptext').addClass('helpcontent'); + div.html(txt); + + % style table + tabl = div.select('table'); + assert(~isempty(tabl), ' not found'); + tabl.addClass('table'); + if opts.TableSorting + tabl.addClass('sortable'); % enable_sortable_tables + end + + % convert document back to HTML string + txt = char(jdoc.toString()); +end + +function txt = generate_contents_m(opts) + %GENERATE_CONTENTS_M Create Contents.m containing list of all functions + + % cv package function/class names and their H1-lines + txt = help('cv'); + if isempty(txt) + return; + end + + % parse text into table + t = create_files_table(opts, txt); + + % format table as contents.m file + txt = table_to_contents_m(opts, t); +end + +function txt = generate_helptoc_xml(opts) + %GENERATE_HELPTOC_XML Create helpfuncbycat.xml containing list of all functions + + % cv package function/class names and their H1-lines + txt = help('cv'); + if isempty(txt) + return; + end + + % parse text into table + t = create_files_table(opts, txt); + + % format table as helpfuncbycat.xml file + txt = table_to_helptoc_xml(opts, t); +end + +function t = create_files_table(opts, txt) + %CREATE_FILES_TABLE Build table containing list of all mexopencv files + + % split by lines + C = textscan(txt, '%s', 'Delimiter','\n'); + C = C{1}; + C(cellfun(@isempty, C)) = []; + + % remove section headers + str = 'Contents of '; + C(strncmp(C, str, numel(str))) = []; + + % remove simcoverage entry cv.schema + str = 'schema '; + C(strncmp(C, str, numel(str))) = []; + + % split each line: "name - description" + C = regexp(C, ' - ', 'split', 'once'); + C = strtrim(cat(1, C{:})); + + % add cv. prefix to name + C(:,1) = strcat('cv.', C(:,1)); + + % create table + t = cell2table(C, 'VariableNames',{'names','descriptions'}); + + if opts.IndexGroups + % extract OpenCV module names for each mexopencv function + tt = enumerate_mexopencv_files(opts); + + % join the two tables by name + t = innerjoin(t, tt, 'Keys','names'); + + % sort table + t = sortrows(t, {'repos', 'modules', 'names'}); + end +end + +function txt = table_to_index_html(opts, t) + %TABLE_TO_INDEX_HTML Format table of mexopencv files as HTML table + + if opts.IndexGroups + linkify = @(p) strrep(strrep(p, mexopencv.root(), ... + 'https://github.com/kyamagu/mexopencv/blob/master'), '\', '/'); + txt = strcat('', ... + '', ... + '', ... + '', ... + '', ... + '', ... + ''); + txt = strjoin({ + '
', t.names, '', t.modules, '', t.repos, '', t.descriptions, 'C ', ... + 'M ', ... + 'T
' + '' + '' + '' + strjoin(txt, '\n') + '' + '
NameModuleRepoDescriptionSource Files
' + }, '\n'); + else + txt = strcat(... + '', t.names, '', ... + '', t.descriptions, ''); + txt = strjoin({ + '' + '' + '' + strjoin(txt, '\n') + '' + '
NameDescription
' + }, '\n'); + end +end + +function txt = table_to_index_cat_html(opts, t) + %TABLE_TO_INDEX_CAT_HTML Format table of mexopencv files as HTML tables + + opts.IndexGroups = false; % for the table_to_index_html calls below + + % format list of files as HTML tables, one per module + txt = {}; + [repos,~,rid] = unique(t.repos); + for r=1:max(rid) + tt = t(rid == r,:); + [modules,~,mid] = unique(tt.modules); + txt{end+1} = sprintf('

%s

', repos{r}, repos{r}); + for m=1:max(mid) + ttt = tt(mid == m,:); + txt{end+1} = sprintf('

%s

', modules{m}, modules{m}); + txt{end+1} = table_to_index_html(opts, ttt); + end + end + txt = strjoin(txt, '\n'); +end + +function txt = table_to_contents_m(opts, t) + %TABLE_TO_CONTENTS_M Format table of mexopencv files as contents.m file + + % formatting string for each entry + maxlen = max(cellfun(@length, t.names)); + frmt = ['%% %-' int2str(maxlen) 's - %s\n']; + + % format list of files as Contents.m text + if opts.IndexGroups + txt = {}; + [repos,~,rid] = unique(t.repos); + for r=1:max(rid) + tt = t(rid == r,:); + [modules,~,mid] = unique(tt.modules); + txt{end+1} = ['%% ' repos{r} ':']; + txt{end+1} = '%'; + for m=1:max(mid) + ttt = tt(mid == m,:); + C = [ttt.names, ttt.descriptions]'; + txt{end+1} = ['% ' modules{m} ':']; + txt{end+1} = [sprintf(frmt, C{:}) '%']; end end + txt = strjoin(txt, '\n'); + else + C = [t.names, t.descriptions]'; + txt = [sprintf(frmt, C{:}) '%']; + end + + % add header + txt = strjoin({ + '% mexopencv' + sprintf('%% Version %s (R%s) %s', cv.Utils.version(), ... + version('-release'), datestr(now(), 'dd-mmmm-yyyy')) + '%' + txt + '' + }, '\n'); +end - function txt = process_css(this, txt) - %PROCESS_CSS - txt = strrep(txt,'font-size: 12px;','font-size: 14px;'); - txt = sprintf(['%s\npre {\n'... - ' margin: 0em 2em;\n'... - ' padding: .5em 1em;\n'... - ' background-color: #E7EBF7;\n'... - '}\n'... - '\nh1, h2, h3, h4, h5, h6 {\n'... - ' color:#990000;'... - '}'],txt); - end - - function txt = process_index(this) - %PROCESS_INDEX - txt = help2html('cv'); - filename = fullfile(MDoc.DIR,'index.html'); - - % Filter - description = ['

Collection and a development kit of matlab '... - 'mex functions for OpenCV library

'... - '

'... - 'http://github.com/kyamagu/mexopencv

']; - txt = strrep(txt,'
cv
',... - sprintf('
mexopencv
%s',description)); - txt = regexprep(txt,'Contents of \w+:\s*',''); - txt = this.filter_text(txt); - txt = this.build_table(txt); - - % Write - fid = fopen(filename,'w'); - fprintf(fid,'%s',txt); - fclose(fid); - end - - function txt = build_table(this, txt) - %BUILD_TABLE - [splt,tok] = regexp(txt,... - '(
\s*
.*
\s*
)','split','tokens'); - if ~isempty(tok) - tok = regexp(tok{1}{1},'
\s*(.*)
','tokens'); - if ~isempty(tok) - tok = sprintf('%s\n',tok{1}{1}); - t = regexp(tok,'()\s*-\s*([^\n]*)\n','tokens'); - tok = cellfun(@(x) sprintf('%s%s\n',x{1},x{2}),... - t, 'UniformOutput', false); - tok = sprintf('\n%s
\n',[tok{:}]); - txt = [splt;{sprintf('
%s
',tok),''}]; - txt = [txt{:}]; +function txt = table_to_helptoc_xml(opts, t) + %TABLE_TO_HELPTOC_XML Format table of mexopencv files as helpfuncbycat.xml file + + %HACK: description should not be included in R2014b and up + % (only supported in older help browser "doc -classic") + withPurpose = verLessThan('matlab','8.4'); + + % format list of files as functions reference for helptoc.xml/helpfuncbycat.xml + txt = {}; + txt{end+1} = ''; + txt{end+1} = ''; + txt{end+1} = ' Functions'; + if opts.IndexGroups + [repos,~,rid] = unique(t.repos); + for r=1:max(rid) + tt = t(rid == r,:); + [modules,~,mid] = unique(tt.modules); + txt{end+1} = sprintf(... + ' %s', ... + repos{r}, repos{r}); + for m=1:max(mid) + ttt = tt(mid == m,:); + if withPurpose + C = strcat(... + ' ', ttt.names, '', ... + xml_escape(ttt.descriptions), ''); + else + C = strcat(... + ' ', ttt.names, ''); end + txt{end+1} = sprintf(... + ' %s', ... + modules{m}, modules{m}); + txt{end+1} = strjoin(C, '\n'); + txt{end+1} = ' '; end + txt{end+1} = ' '; + end + else + if withPurpose + C = strcat(... + ' ', t.names, '', ... + xml_escape(t.descriptions), ''); + else + C = strcat(... + ' ', t.names, ''); + end + txt{end+1} = strjoin(C, '\n'); + end + txt{end+1} = ' '; + txt{end+1} = ''; + txt{end+1} = ''; + txt = strjoin(txt, '\n'); +end + +function txt = wiki_md2html(opts, mdFile) + %WIKI_MD2HTML Convert Wiki Markdown file to HTML + + % read and convert Markdown to HTML + txt = fileread(mdFile); + txt = MarkdownProcess(txt, opts.MarkdownBackend); + + % full HMTL page from fragment + [~,name] = fileparts(mdFile); + C = { + '' + '' + '' + '' + ['' strrep(name, '-', ' ') ''] + '' + '' + }; + if opts.SyntaxHighlighting + C = [ + C + '' + '' + '' + txt + '' + '' + '' + '' + '' + ]; + else + C = [ + C + '' + '' + txt + '' + '' + ]; + end + C{end+1} = ''; + txt = strjoin(C, '\n'); +end + +function jdoc = parse_help2html(opts, topic) + %PARSE_HELP2HTML Parse HELP2HTML output and perform common filtering + + % convert M-help into HTML document + [txt, found] = help2html(topic); + if ~found + jdoc = []; + return; + end + + % parse HTML string + jdoc = javaMethod('parse', 'org.jsoup.Jsoup', txt); + jdoc.outputSettings.prettyPrint(false); % for proper markdown parsing + + % replace CSS stylesheet with our local version (relative URI) + jdoc.select('link[href$=helpwin.css]').attr('href', 'helpwin.css'); + + % add another sytlesheet for customization (relative URI) + inject_css_file(jdoc, 'helpwin_custom.css'); + + % change title + txt = strrep(char(jdoc.title()), 'MATLAB File Help: ', ''); + jdoc.title([txt ' - mexopencv']); + + % add generator meta tag + mt = jdoc.head().appendElement('meta'); + mt.attr('name', 'generator').attr('content', ['MATLAB ' version()]); + + % remove highlighting of function names + % (turn func into func) + jdoc.select('span.helptopic').unwrap(); + + % rewrite .. hyperlinks (mainly in header/footer) + rewrite_hyperlinks(opts, jdoc); + + if opts.MarkdownRender + % render Markdown + render_markdown(opts, jdoc); + + % mark the first paragraph which corresponds to H1-line + p = jdoc.select('div.helpcontent > p:first-child').first(); + if ~isempty(p) + p.addClass('h1line'); + + % add description meta tag (using H1-line description) + desc = [topic ' - ' char(p.text())]; + mt = jdoc.head().appendElement('meta'); + mt.attr('name', 'description').attr('content', desc); end end - methods (Static) - function open - %OPEN Opens documentation - if ~exist(fullfile(MDoc.DIR,'index.html'),'file') - MDoc; + % set external links to open in new tabs + jdoc.select('a[href^=http]').attr('target', '_blank'); + + % enable syntax highlighting of code blocks using google-code-prettify + if opts.SyntaxHighlighting && ~strcmp(topic, 'cv') + enable_syntax_highlighting(opts, jdoc); + end + + % enable sorting tables (only used in index.html) + if opts.TableSorting && strcmp(topic, 'cv') + enable_sortable_tables(opts, jdoc); + end +end + +function rewrite_hyperlinks(opts, jdoc) + %REWRITE_HYPERLINKS Rewrite hyperlinks for offline use + + % replace "matlab:helpwin" (i.e "Default Topics") with index link + jdoc.select('a[href=matlab:helpwin]').html('Index').attr('href', 'index.html'); + + % process rest of "matlab:(helpwin|doc) cv.fcn" links + links = jdoc.select('a[href^=matlab:helpwin], a[href^=matlab:doc]'); + it = links.iterator(); % iterate over links as ArrayList + while it.hasNext() + % get href + a = it.next(); + topic = char(a.attr('href')); + + % remove "matlab:(helpwin|doc)" prefix + topic = regexprep(topic, 'matlab:(helpwin|doc)\s*', '', 'once'); + + % remove enclosing parens/quotes in case of function syntax + % "matlab:helpwin('x')" vs. command syntax "matlab:helpwin x" + topic = regexprep(topic, '^\(''', '', 'once'); + topic = regexprep(topic, '''\);?$', '', 'once'); + + % fix class methods (i.e: cv.Class/method -> cv.Class.method) + topic = strrep(topic, '/', '.'); + + % check if link to another cv function + if strncmp(topic, 'cv.', 3) + if is_handle_member(opts, topic) + % replace link to handle.* methods + url = 'https://www.mathworks.com/help/matlab/ref/handle-class.html'; + a.attr('href', url); + else + % replace link with a relative "fcn.html" link + a.attr('href', [topic '.html']); end - web(fullfile(MDoc.DIR,'index.html')); + else + % check if link to an official TMW function + url = is_mathworks_function(opts, topic); + if ~isempty(url) + a.attr('href', url); + end + end + end + + % remove some raw-code links ("matlab:open .." and "matlab:edit ..") + jdoc.select('a[href~=matlab:(open|edit)]').remove(); + + % turn any remaning "matlab:" links to plain text + jdoc.select('a[href^=matlab:]').unwrap(); +end + +function render_markdown(opts, jdoc) + %RENDER_MARKDOWN Convert help text from Markdown to HTML + autolinking + + % extract markdown text + pre = jdoc.select('div.helptext > pre').first(); + assert(~isempty(pre), '
 not found');
+    %NOTE: this also undoes http:// autolinking done by help2html
+    txt = char(pre.text());
+
+    %HACK: for correct Markdown indentation, we remove two leading spaces:
+    % - first one inserted by help2html for some reason
+    % - second one is part of when we write a comment (e.g "% some text")
+    %   being the space between the % symbol and the first character
+    txt = regexprep(txt, '^  ', '', 'lineanchors');
+
+    % remove redundant function name at the beginning (exclude property docs)
+    if jdoc.select('div.sectiontitle:containsOwn(Property Details)').isEmpty()
+        txt = regexprep(txt, '^\w+\s+', '', 'once');
+    end
+
+    % autolink "http://" and "www." text
+    % (before rendering MD so that we dont later double-link  links)
+    if opts.AutoLinking
+        txt = auto_link_http(opts, txt);
+    end
+    if opts.AutoLinkingDeep
+        txt = auto_link_www(opts, txt);
+    end
+
+    % convert Markdown to HTML
+    txt = MarkdownProcess(txt, opts.MarkdownBackend);
+
+    % autolink "cv." functions text
+    % (after rendering MD so that we dont break indented code with cv. calls)
+    if opts.AutoLinkingCV
+        txt = auto_link_cv(opts, txt);
+    end
+
+    % replace the "helptext" part with the rendered Markdown
+    div = jdoc.select('div.helptext').first();
+    assert(~isempty(div), '
not found'); + div.removeClass('helptext').addClass('helpcontent'); + div.html(txt); +end + +function klass = enable_syntax_highlighting(opts, jdoc) + %ENABLE_SYNTAX_HIGHLIGHTING Add syntax highlighting using google-code-prettify + + % add prettyprint CSS class to pre+code blocks + klass = 'prettyprint'; + jdoc.select('pre:has(code)').addClass(klass); + + % set language hint (only if one is not already present, which could be + % specified in case MD backend supports fenced code blocks "```lang") + if ~isempty(opts.PrettifyLang) + jdoc.select('pre:has(code:not([class]))').addClass(opts.PrettifyLang); + end + + % append code-prettify matlab theme CSS stylesheet + url = 'https://cdn.rawgit.com/amroamroamro/prettify-matlab/master/dist/css/matlab.min.css'; + inject_css_file(jdoc, get_resource(opts, url)); + + % append code-prettify JavaScript + url = 'https://cdn.rawgit.com/google/code-prettify/master/loader/prettify.js'; + inject_js_file(jdoc, get_resource(opts, url)); + + % append lang-matlab extension for code-prettify JavaScript + url = 'https://cdn.rawgit.com/amroamroamro/prettify-matlab/master/dist/js/full/lang-matlab.min.js'; + inject_js_file(jdoc, get_resource(opts, url)); + + % execute JavaScript when page is ready + jdoc.body().attr('onload', 'PR.prettyPrint();'); +end + +function klass = enable_sortable_tables(opts, jdoc) + %ENABLE_SORTABLE_TABLES Adds ability to sort tables by clicking on headers + + % set CSS class to tables with header + klass = 'sortable'; + jdoc.select('table:has(thead)').addClass(klass); + + % append sorttable JavaScript library + url = 'https://cdn.jsdelivr.net/sorttable/2/sorttable.min.js'; + inject_js_file(jdoc, get_resource(opts, url)); +end + +function fname = get_resource(opts, url) + %GET_RESOURCE Return resource URI, optionally downloaded locally + + if opts.NoExternals + % download resource + [~, f, ext] = fileparts(url); + fname = [f ext]; + filename = fullfile(opts.DIR, fname); + if exist(filename, 'file') ~= 2 + urlwrite(url, filename); + end + else + fname = url; + end +end + +function skip = is_handle_member(opts, topic) + %IS_HANDLE_MEMBER Whether to ignore processing a function + + % decide to skip if it matches the pattern "cv.Class.method", + % and the "method" is inherited from the HANDLE class + C = strsplit(topic, {'.', '/'}); + skip = (numel(C) >= 2 && ismember(C{end}, opts.Ignore)); +end + +function url = is_mathworks_function(~, topic) + %IS_MATHWORKS_FUNCTION Get online address for official MathWorks function + + % template for URL of online function help + tmpl = 'https://www.mathworks.com/help/%s/ref/%s.html'; + + % check if it is a builtin function + if exist(topic, 'builtin') == 5 + url = sprintf(tmpl, 'matlab', lower(topic)); + return; + end + + % locate function and test if it is a toolbox function + str = which(topic); + base = toolboxdir(''); + if ~isempty(str) && strncmp(str, base, numel(base)) + tbx = strsplit(strrep(str, base, ''), filesep()); + url = sprintf(tmpl, tbx{2}, lower(topic)); + return; + end + + % function not found + url = ''; +end + +function txt = auto_link_http(~, txt) + %AUTO_LINK_HTTP Autolinks "http://" text + + % http:// or https:// + % (neg lookbehind to avoid breaking existing [..](http://..) MD links/imgs) + re = '(?|$))'; + rep = '$1[$2](http://$2)'; + txt = regexprep(txt, re, rep, 'all', 'preservecase', 'lineanchors'); +end + +function txt = auto_link_cv(~, txt) + %AUTO_LINK_HTTP Autolinks cv.* functions text + + % linkify cv.func (relative URLs) + re = '\'; + rep = '$0'; + txt = regexprep(txt, re, rep, 'all', 'preservecase', 'lineanchors'); +end + +function names = enumerate_mexopencv_members(opts) + %ENUMERATE_MEXOPENCV_MEMBERS Enumerate all members in mexopencv + + % names of ignored methods/properties/events + if opts.IgnoreHandleClass + % those inherited from handle class (exclude dtor and static empty) + mt0 = meta.class.fromName('handle'); + m0 = setdiff({mt0.MethodList.Name}, {'delete', 'empty'}); + p0 = {mt0.PropertyList.Name}; + e0 = {mt0.EventList.Name}; + else + m0 = {}; + p0 = {}; + e0 = {}; + end + + %NOTE: we're taking all methods/props/events, including hidden and private + + mt = meta.package.fromName('cv'); + + % list of functions + f = strcat('cv.', {mt.FunctionList.Name}); + + % list of classes + c = {mt.ClassList.Name}; + + % list of class methods + m = arrayfun(@(k) strcat(k.Name, '.', setdiff({k.MethodList.Name}, m0)), ... + mt.ClassList, 'UniformOutput',false); + m = m(~cellfun(@isempty, m)); + m = cat(2, m{:}); + + % conditionaly exclude .empty() method if inherited from handle class + ind = arrayfun(@(k) find(strcmp({k.MethodList.Name}, 'empty'), ... + 1, 'first'), mt.ClassList); + mask = arrayfun(@(k,i) k.MethodList(i).Static, mt.ClassList, ind); + m1 = strcat({mt.ClassList(mask).Name}, '.empty'); + m = setdiff(m, m1); + + % list of class properties + p = arrayfun(@(k) strcat(k.Name, '.', setdiff({k.PropertyList.Name}, p0)), ... + mt.ClassList, 'UniformOutput',false); + p = p(~cellfun(@isempty, p)); + p = cat(2, p{:}); + + % list of class events + e = arrayfun(@(k) strcat(k.Name, '.', setdiff({k.EventList.Name}, e0)), ... + mt.ClassList, 'UniformOutput',false); + e = e(~cellfun(@isempty, e)); + e = cat(2, e{:}); + + % combined list + names = unique([f(:); c(:); m(:); p(:); e(:)]); +end + +function t = enumerate_mexopencv_files(~) + %ENUMERATE_MEXOPENCV_FILES Return a table of all mexopencv files + + % get list of all C++ sources + paths = { + fullfile(mexopencv.root(), 'src', '+cv', '*.cpp') + fullfile(mexopencv.root(), 'src', '+cv', 'private', '*.cpp') + fullfile(mexopencv.root(), 'opencv_contrib', 'src', '+cv', '*.cpp') + fullfile(mexopencv.root(), 'opencv_contrib', 'src', '+cv', 'private', '*.cpp') + }; + cppfiles = cellfun(@cv.glob, paths, 'UniformOutput',false); + cppfiles = [cppfiles{:}]; + cppfiles = cppfiles(:); + + % determine which ones are from "opencv_contrib" + iscontrib = regexp(cppfiles, '\', 'once'); + iscontrib = ~cellfun(@isempty, iscontrib); + + repos = {'opencv'; 'opencv_contrib'}; + repos = repos(iscontrib + 1); + + % get module name of each file + modules = cellfun(@get_opencv_module, cppfiles, 'UniformOutput',false); + + % get base file name + [~,names] = cellfun(@fileparts, cppfiles, 'UniformOutput',false); + names = regexprep(names, '_$', '', 'once'); + + % corresponding M-files + paths = { + fullfile(mexopencv.root(), '+cv') + fullfile(mexopencv.root(), 'opencv_contrib', '+cv') + }; + mfiles = strcat(paths(iscontrib + 1), filesep(), names, '.m'); + + % corresponding test files + paths = { + fullfile(mexopencv.root(), 'test', 'unit_tests') + fullfile(mexopencv.root(), 'opencv_contrib', 'test', 'unit_tests') + }; + tfiles = regexprep(names, '^[a-z]', '${upper($0)}', 'once'); + tfiles = regexprep(tfiles, '_(\w)', '${upper($1)}', 'once'); + tfiles = strcat(paths(iscontrib + 1), filesep(), 'Test', tfiles, '.m'); + + % add cv. prefix to name + names = strcat('cv.', names); + + % create table + t = table(names, cppfiles, mfiles, tfiles, modules, repos); +end + +function module = get_opencv_module(cppfile) + %GET_OPENCV_MODULE Extract OpenCV module name for a mexopencv C++ source file + + module = ''; + fid = fopen(cppfile, 'rt'); + tline = fgetl(fid); + while ischar(tline) + if ~isempty(strfind(tline, '@ingroup')) + module = regexp(tline, '@ingroup (\w+)$', 'tokens', 'once'); + module = module{1}; + break; end + tline = fgetl(fid); end + fclose(fid); +end +function inject_css_file(jdoc, url) + %INJECT_CSS_FILE Inject CSS stylesheet + + link = jdoc.head().appendElement('link'); + link.attr('rel', 'stylesheet'); + link.attr('type', 'text/css'); + link.attr('href', url); +end + +function inject_css(jdoc, txt) + %INJECT_CSS Inject CSS code + + style = jdoc.head().appendElement('style'); + style.attr('type', 'text/css'); + append_data_node(style, txt); +end + +function inject_js_file(jdoc, url) + %INJECT_JS_FILE Inject JavaScript file + + script = jdoc.body().appendElement('script'); + script.attr('type', 'text/javascript'); + script.attr('src', url); +end + +function inject_js(jdoc, txt) + %INJECT_JS Inject JavaScript code + + script = jdoc.body().appendElement('script'); + script.attr('type', 'text/javascript'); + append_data_node(script, txt); +end + +function append_data_node(node, txt) + %APPEND_DATA_NODE Used to set a style/script tag CSS/JavaScript text without escaping + + % append a child DataNode to specified node + if ~isempty(node) + node.appendChild(... + javaObject('org.jsoup.nodes.DataNode', txt, node.baseUri())); + end +end + +function txt = xml_escape(txt) + %XML_ESCAPE Escape XML special characters + + txt = strrep(txt, '&', '&'); + txt = strrep(txt, '<', '<'); + txt = strrep(txt, '>', '>'); + txt = strrep(txt, '"', '"'); + txt = strrep(txt, '''', '''); +end + +function filewrite(filename, str) + %FILEWRITE Write text to file + + fid = fopen(filename, 'wt', 'n', 'UTF-8'); + fprintf(fid, '%s', str); + fclose(fid); +end + +function outHTML = MarkdownProcess(inMD, backend) + %MARKDOWNPROCESS Converts Markdown text to HTML + % + % outHTML = MarkdownProcess(inMD) + % outHTML = MarkdownProcess(inMD, backend) + % + % ## Input + % * __inMD__ Input string containing Markdown source. + % + % ## Output + % * __outHTML__ Output HTML formatted string. + % + % This function takes a a string of Markdown source and converts it + % into a formatted HTML output string. + % + % [Markdown language](https://daringfireball.net/projects/markdown/) was + % created by John Gruber and provides an easy-to-read, easy-to-write plain + % text format that takes many cues from existing conventions for marking + % up plain text in email. + % + % ## Example + % + % % benchmark the different backends + % str = repmat(sprintf('*Hello* __world__!\n\n'), 1, 100); + % %web(['text://' MarkdownProcess(str)]) + % backends = {..}; + % t = zeros(size(backends)); + % for i=1:numel(backends) + % t(i) = timeit(@() MarkdownProcess(str, backends{i})); + % end + % disp([backends num2cell(t*1000)]) + % + + % process Markdown + switch backend + case 'commonmark-java' + builder = javaMethod('builder', 'org.commonmark.parser.Parser'); + parser = builder.build(); + document = parser.parse(inMD); + builder = javaMethod('builder', ... + 'org.commonmark.renderer.html.HtmlRenderer'); + renderer = builder.build(); + outHTML = char(renderer.render(document)); + + case 'MarkdownPapers' + parser = javaObject('org.tautua.markdownpapers.Markdown'); + reader = javaObject('java.io.StringReader', inMD); + writer = javaObject('java.io.StringWriter'); + parser.transform(reader, writer); + outHTML = char(writer.toString()); + + case 'pegdown' + parser = javaObject('org.pegdown.PegDownProcessor'); + outHTML = char(parser.markdownToHtml(inMD)); + + case 'markdownj' + parser = javaObject('org.markdownj.MarkdownProcessor'); + outHTML = char(parser.markdown(inMD)); + + case 'markdown4j' + %NOTE: it treats newlines as real line breaks! + parser = javaObject('org.markdown4j.Markdown4jProcessor'); + outHTML = char(parser.process(inMD)); + + case 'txtmark' + outHTML = char(javaMethod('process', ... + 'com.github.rjeschke.txtmark.Processor', inMD)); + + case 'none' + outHTML = inMD; + + otherwise + error('Unrecognized Markdown backend: %s', backend); + end end diff --git a/utils/MarkdownPapers.m b/utils/MarkdownPapers.m deleted file mode 100644 index 85808c2bd..000000000 --- a/utils/MarkdownPapers.m +++ /dev/null @@ -1,28 +0,0 @@ -function [ output ] = MarkdownPapers( input ) -%MarkdownPapers A java implementation of Markdown language created by John Gruber -% -% MarkdownPapers is a java implementation of Markdown language created by -% John Gruber which provides an easy-to-read, easy-to-write plain text -% format that takes many cues from existing conventions for marking up -% plain text in email. -% -% http://markdown.tautua.org/ -% -% Markdown syntax: http://daringfireball.net/projects/markdown/ -% - -% check jar path -p = fullfile(fileparts(mfilename('fullpath')),'MarkdownPapers',... - 'markdownpapers-core-1.2.3.jar'); -if all(cellfun(@isempty,strfind(javaclasspath, p))) - javaaddpath(p); -end - -% call java function -md = org.tautua.markdownpapers.Markdown; -in = java.io.StringReader(java.lang.String(input)); -out = java.io.StringWriter(); -md.transform(in,out); -output = char(out.toString()); - -end diff --git a/utils/MarkdownPapers/LICENSE b/utils/MarkdownPapers/LICENSE deleted file mode 100644 index f49a4e16e..000000000 --- a/utils/MarkdownPapers/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/utils/MarkdownPapers/markdownpapers-core-1.2.3.jar b/utils/MarkdownPapers/markdownpapers-core-1.2.3.jar deleted file mode 100644 index 4615a7abb..000000000 Binary files a/utils/MarkdownPapers/markdownpapers-core-1.2.3.jar and /dev/null differ diff --git a/utils/MavenDownload.m b/utils/MavenDownload.m new file mode 100644 index 000000000..2a3eda2e6 --- /dev/null +++ b/utils/MavenDownload.m @@ -0,0 +1,287 @@ +function [pkg, filenames] = MavenDownload(varargin) + %MAVENDOWNLOAD Downloads JAR artifacts from Maven Central + % + % pkg = MavenDownload(groupId, artifactId) + % pkg = MavenDownload(..., 'OptionName',optionValue, ...) + % [pkg, filenames] = MavenDownload(...) + % + % ## Input + % * __groupId__ group Id (organization name), may be empty if there is no + % ambiguity. + % * __artifactId__ artifact Id (module name). + % + % ## Options + % * __Version__ (optional) package version. If not specified, the + % latest version is requested. + % * __SkipDependencies__ Decide if dependencies should not be processed. + % Default false. + % * __Download__ Decide if packages should be downloaded. Default true. + % * __Verbose__ Verbosity messages. Default true. + % * __OutDir__ Directory where to place downloaded JAR files. By default, + % the current directory is used. + % + % ## Output + % * __pkg__ struct describing the package and its dependencies + % * __filenames__ Full paths to downloaded JAR files. + % + % This is a poor man's implementation of a Java package manager. + % It searches the [Maven Central Repository](http://search.maven.org/) + % and downloads a package JAR file and its dependencies. + % + % The Maven REST API is documented [here](http://search.maven.org/#api). + % + % ## Example + % + % MavenDownload('org.jsoup', 'jsoup', 'OutDir','jsoup') + % MavenDownload('org.commonjava.googlecode.markdown4j', 'markdown4j', 'OutDir','markdown4j') + % MavenDownload('org.markdownj', 'markdownj-core', 'OutDir','markdownj') + % MavenDownload('org.tautua.markdownpapers', 'markdownpapers-core', 'OutDir','markdownpapers') + % MavenDownload('org.pegdown', 'pegdown', 'OutDir','pegdown') + % MavenDownload('com.github.rjeschke', 'txtmark', 'OutDir','txtmark') + % MavenDownload('com.atlassian.commonmark', 'commonmark', 'OutDir','commonmark-java') + % MavenDownload('com.atlassian.commonmark', 'commonmark-ext-gfm-tables', 'OutDir','commonmark-java') + % + % See also: webread, websave, urlencode, jsondecode + % + + % parse inputs + nargoutchk(0,2); + opts = parse_options(varargin{:}); + + % find requested package + pkg = struct(); + pkg.groupId = opts.groupId; + pkg.artifactId = opts.artifactId; + pkg.version = opts.version; + + % search for latest version if none specified + if isempty(pkg.groupId) || isempty(pkg.version) + pkg = package_search(pkg, opts); + end + + % find dependencies + if ~opts.SkipDependencies + pkg = package_dependencies(pkg, opts); + end + + % download JAR files (including all dependencies) + if opts.Download + filenames = package_download(pkg, opts); + end +end + +function opts = parse_options(varargin) + %PARSE_OPTIONS Help function to parse function inputs + + % helper function to validate true/false arguments + isbool = @(x) isscalar(x) && (islogical(x) || isnumeric(x)); + + p = inputParser(); + p.addRequired('groupId', @ischar); + p.addRequired('artifactId', @(x) ischar(x) && ~isempty(x)); + p.addParameter('version', '', @ischar); + p.addParameter('SkipDependencies', false, isbool); + p.addParameter('Download', true, isbool); + p.addParameter('Verbose', true, isbool); + p.addParameter('OutDir', '', @ischar); + p.parse(varargin{:}); + + opts = p.Results; + opts.SkipDependencies = logical(opts.SkipDependencies); + opts.Download = logical(opts.Download); + opts.Verbose = logical(opts.Verbose); +end + +function pkg = package_search(pkg, opts) + %PACKAGE_SEARCH Search for package from Maven by artifact Id + + % search by artifact id + if opts.Verbose, disp(['Searching... ' pkg.artifactId]); end + url = 'http://search.maven.org/solrsearch/select'; + query = {'q',sprintf('a:"%s"',pkg.artifactId), 'rows',5, 'wt','json'}; + data = webread(url, query{:}, weboptions('ContentType','json')); + + % check number of results + assert(~isempty(data), 'No data received'); + res = data.response; + if res.numFound == 0 + error('mexopencv:err', ... + 'artifactId = "%s" was not found', pkg.artifactId); + elseif res.numFound > 1 && isempty(pkg.groupId) + %disp({res.docs.g}) + error('mexopencv:err', ... + 'Ambiguous artifactId = "%s", specify groupId', pkg.artifactId); + end + + % match groupId if specified + idx = 1; + if ~isempty(pkg.groupId) + g = {res.docs.g}; + if res.numFound > 1 + [~,idx] = ismember(validatestring(pkg.groupId, g), g); + else + assert(strcmp(pkg.groupId, g{1}), ... + 'groupId = "%s" does not match "%s"', pkg.groupId, g{1}); + end + end + + % unique project name + pkg.groupId = res.docs(idx).g; + pkg.artifactId = res.docs(idx).a; + if isempty(pkg.version) + pkg.version = res.docs(idx).latestVersion; + end +end + +function pkg = package_dependencies(pkg, opts) + %PACKAGE_DEPENDENCIES Fill package dependencies + + %TODO: cache dependencies to avoid fetching same POM multiple times + %TODO: cyclic dependencies + + % fetch POM XML file + fpath = package_filepath(pkg, '.pom'); + if opts.Verbose, disp(['Fetching... ' fpath]); end + url = 'http://search.maven.org/remotecontent'; + query = {'filepath',fpath}; + pom = webread(url, query{:}, weboptions('ContentType','xmldom')); + + % parse it for dependencies (and transitive dependencies) + pkg.deps = parse_pom(pom); + for i=1:numel(pkg.deps) + pkg.deps(i) = package_dependencies(pkg.deps(i), opts); + end +end + +function filenames = package_download(pkg, opts) + %PACKAGE_DOWNLOAD Download JAR package and its dependencies + + % fetch JAR file + [fpath, filename] = package_filepath(pkg, '.jar'); + if ~isempty(opts.OutDir) + filename = fullfile(opts.OutDir, filename); + if ~isdir(opts.OutDir) + mkdir(opts.OutDir); + end + end + if exist(filename, 'file') ~= 2 + if opts.Verbose, disp(['Downloading... ' fpath]); end + url = 'http://search.maven.org/remotecontent'; + query = {'filepath',fpath}; + filename = websave(filename, url, query{:}); + else + if opts.Verbose, disp(['Skipped ' fpath]); end + end + filenames = filename; + + % download dependencies + if ~opts.SkipDependencies + fnames = cell(size(pkg.deps)); + for i=1:numel(pkg.deps) + fnames{i} = package_download(pkg.deps(i), opts); + end + + % combine and flatten names + filenames = [filenames fnames]; + while any(cellfun(@iscell, filenames)) + filenames = [filenames{:}]; + end + filenames = unique(filenames, 'stable'); + end +end + +function [fpath, filename] = package_filepath(pkg, packaging) + %PACKAGE_FILEPATH Construct path to fetch package from Maven Central + + % fpath = org/github/com/my-project/1.0.0/my-project-1.0.0.jar + assert(~isempty(pkg.groupId)); + assert(~isempty(pkg.artifactId)); + assert(~isempty(pkg.version)); + packaging = validatestring(packaging, ... + {'.pom', '.jar', '-sources.jar', '-javadoc.jar'}); + filename = sprintf('%s-%s%s', pkg.artifactId, pkg.version, packaging); + fpath = sprintf('%s/%s/%s/%s', ... + strrep(pkg.groupId,'.','/'), pkg.artifactId, pkg.version, filename); +end + +function deps = parse_pom(pom) + %PARSE_POM Extract dependencies from POM document DOM + % + % See also: xmlread + % + + % output structure-array of dependencies + deps = struct('groupId',{}, 'artifactId',{}, 'version',{}, 'deps',{}); + k = 1; % insertion index + + % debug + %{ + disp(pom.saveXML([])); + keyboard + %} + + % get tag + nodes = pom.getElementsByTagName('dependencies').item(0); + if isempty(nodes) + return; % no dependencies + end + + % check if contains a tag + node = pom.getElementsByTagName('parent').item(0); + if ~isempty(node) + p_groupId = getElementTextByTagName(node, 'groupId'); + p_artifactId = getElementTextByTagName(node, 'artifactId'); + p_version = getElementTextByTagName(node, 'version'); + else + p_groupId = ''; + p_artifactId = ''; + p_version = ''; + end + + % loop over child tags + for i=1:nodes.getLength() + node = nodes.item(i-1); + + % skip anything other than (text/comments/etc. nodes) + if node.getNodeType() ~= 1 || ~strcmp(node.getNodeName(), 'dependency') + continue; + end + + % check dependency and ignore test dependencies + scope = getElementTextByTagName(node, 'scope'); + if ~isempty(scope) && strcmp(scope, 'test') + continue; + end + + % get dependency , , + deps(k).groupId = getElementTextByTagName(node, 'groupId'); + deps(k).artifactId = getElementTextByTagName(node, 'artifactId'); + deps(k).version = getElementTextByTagName(node, 'version'); + + %HACK: fill empty fields with parent fields + if isempty(deps(k).groupId) + deps(k).groupId = p_groupId; + end + if isempty(deps(k).artifactId) + deps(k).artifactId = p_artifactId; + end + if isempty(deps(k).version) + deps(k).version = p_version; + end + + % prepare for next entry + k = k + 1; + end +end + +function txt = getElementTextByTagName(node, tag) + %GETELEMENTTEXTBYTAGNAME Helper function to get text content of tag node + + % equivalent to: txt = node.select('tag').first().text() + nodeTAG = node.getElementsByTagName(tag).item(0); + if ~isempty(nodeTAG) + txt = char(nodeTAG.getFirstChild().getTextContent()); + else + txt = ''; + end +end diff --git a/utils/helpwin_custom.css b/utils/helpwin_custom.css new file mode 100644 index 000000000..a2bcfad93 --- /dev/null +++ b/utils/helpwin_custom.css @@ -0,0 +1,113 @@ +/* global font */ +@import url(https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic|Open+Sans+Condensed:700|Droid+Serif); +body { + font-family: 'Open Sans', Helvetica, Arial, sans-serif; + font-size: 14px; + white-space: normal; +} + +/* headers */ +h1, h2, h3, h4, h5, h6 { + color: #990000; +} +h1, h2, h3 { + border-bottom: 1px dotted black; +} + +/* all code sections */ +pre, code, tt, kbd, samp { + font-family: Consolas, 'DejaVu Sans Mono', Menlo, Courier, monospace; + font-size: 1em; + white-space: pre; + white-space: pre-wrap; + white-space: -moz-pre-wrap; + white-space: -o-pre-wrap; +} + +/* references */ +blockquote { + font-family: 'Droid Serif', 'DejaVu Serif', Georgia, serif; + line-height: 140%; + text-align: justify; + padding: 1px 10px; + background-color: #F9F9F9; + border-left: 10px solid #CCC; + border-right: 3px solid #CCC; + -moz-box-shadow: 2px 2px 15px #CCC; + -webkit-box-shadow: 2px 2px 15px #CCC; + box-shadow: 2px 2px 15px #CCC; +} + +/* block code */ +pre { + line-height: 140%; + margin: 0em 2em; + padding: 0.5em 1em; + background-color: #E7EBF7; + -moz-box-shadow: 2px 2px 15px #CCC; + -webkit-box-shadow: 2px 2px 15px #CCC; + box-shadow: 2px 2px 15px #CCC; +} + +/* inline code */ +code { + background-color: #FAF8F0; +} +pre code { + background-color: transparent; /* HACK */ + background-color: inherit; +} + +.helpcontent { + white-space: normal; +} + +/* function name in title */ +.title { + font-family: 'Open Sans Condensed', Helvetica, Arial, sans-serif; + font-size: 2.6em; + font-weight: bold; +} +/* H1 line: subtitle underneath function name */ +/* p.h1line */ +.helpcontent > p:first-child { + font-size: 1.25em; + font-style: italic; + margin: 10px 20px 20px 20px; +} + +/* "See also" section */ +.footerlinktitle { + font-size: 1.2em; + font-weight: bold; + border-top: solid #DDD thin; + margin-top: 30pt; + padding-top: 10pt; +} +.footerlink { + margin-left: 20px; +} + +/* hyperlinks inside code or in SeeAlso section */ +code a, .footerlink a { + text-decoration: none; +} +code a:hover, .footerlink a:hover { + text-decoration: underline; +} + +/* table of functions in index page */ +.table { + border-collapse: collapse; +} +.table td, .table th { + border: 1px solid #eee; + padding: 4px 10px; +} +.table thead { + background-color: #eee; + color: #666666; + font-weight: bold; + line-height: 1.3; + cursor: pointer; +} diff --git a/utils/jars/.gitkeep b/utils/jars/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/utils/mxdom2mathjax.xsl b/utils/mxdom2mathjax.xsl new file mode 100644 index 000000000..9067a4894 --- /dev/null +++ b/utils/mxdom2mathjax.xsl @@ -0,0 +1,466 @@ + + + + + + + + + +]> + + + + + + + + + <!DOCTYPE html>&nl; + + + This HTML was auto-generated from published MATLAB code. + + + + <xsl:choose> + <xsl:when test="count(cell/steptitle[@style='document'])"> + <xsl:value-of select="cell/steptitle[@style='document']"/> + </xsl:when> + <xsl:otherwise><xsl:value-of select="m-file"/></xsl:otherwise> + </xsl:choose> + + + + MATLAB + + + + + + + .m + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + + + + + + +

+ + + + +

+
+ + introduction + + + + /introduction +
+ + + + + +

Contents

+
+
+ + + + + + + + + + h1 + h2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+
+ +
+
+ +
+
+ +
  • +
    + +
    +    
    +      
    +        
    +      
    +    
    +    
    +  
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + +
    +
    + + + + + + +
    +
    + + + + + + + + + + + + + + + +
    +        
    +        
    +      
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + width: + + ; + + + height: + + ; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + width: + + ; + + + height: + + ; + + + + + + + + + + + + + + + + + + + &nl;##### SOURCE BEGIN #####&nl; + + &nl;##### SOURCE END #####&nl; + + + + + + + + + + + + + + + + + + + + + + + + + +
    diff --git a/utils/publish_custom.css b/utils/publish_custom.css new file mode 100644 index 000000000..2628fcc08 --- /dev/null +++ b/utils/publish_custom.css @@ -0,0 +1,86 @@ +/* Stylesheet extracted and customized from: */ +/* C:\Program Files\MATLAB\R2017a\toolbox\matlab\codetools\private\mxdom2simplehtml.xsl */ + +/* css reset */ + +html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,font,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td{margin:0;padding:0;border:0;outline:0;font-size:100%;vertical-align:baseline;background:transparent} +body{line-height:1} +ol,ul{list-style:none} +blockquote,q{quotes:none} +blockquote:before,blockquote:after,q:before,q:after{content:'';content:none} +:focus{outine:0} +ins{text-decoration:none} +del{text-decoration:line-through} +table{border-collapse:collapse;border-spacing:0} + +/* page */ + +html { min-height:100%; margin-bottom:1px; } +html body { height:100%; font-family:Arial, Helvetica, sans-serif; font-size:10px; line-height:140%; color:#000; background:#fff none; overflow-y:scroll; } + +p { margin:0px 0px 20px; } + +.content { padding:20px; font-size:1.2em; line-height:140%; } + +/* headers */ + +h1 { margin:0px 0px 25px; font-size:1.5em; font-weight:normal; line-height:100%; color:#d55000; } +h2 { margin:0px 0px 8px; border-bottom:1px solid #d6d4d4; font-size:1.2em; font-weight:bold; line-height:140%; } +h3 { margin:0px 0px 5px; font-size:1.1em; font-weight:bold; line-height:140%; } + +/* hyperlinks */ + +a { color:#005fce; text-decoration:none; } +a:hover { color:#005fce; text-decoration:underline; } +a:visited { color:#004aa0; text-decoration:none; } + +/* lists */ + +ul { margin:0px 0px 20px 23px; list-style:square; } +ul li { margin:0px 0px 7px 0px; } +ul li ul { padding:5px 0px 0px; margin:0px 0px 7px 23px; } +ul li ol li { list-style:decimal; } +ol { margin:0px 0px 20px 0px; list-style:decimal; } +ol li { margin:0px 0px 7px 23px; list-style-type:decimal; } +ol li ol { padding:5px 0px 0px; margin:0px 0px 7px 0px; } +ol li ol li { list-style-type:lower-alpha; } +ol li ul { padding-top:7px; } +ol li ul li { list-style:square; } + +/* images */ + +img:not(.equation) { margin:0px 5px 20px; } + +/* tables */ + +table th, table td { padding:7px 5px; border:1px solid #d6d4d4; text-align:left; } +table th { vertical-align:middle; font-weight:bold; } +table td { vertical-align:top; } + +/* code */ + +pre, code { font-size:1em; } +tt { font-size:1.2em; } +pre { padding:10px; margin:0px 0px 20px; } +pre.code-matlab { border:1px solid #d3d3d3; } +pre.codeinput { border:1px solid #d3d3d3; background:#f7f7f7; } +pre.codeoutput { padding:10px 11px; color:#4c4c4c; } +pre.error { color:red; } + +@media print { pre.codeinput, pre.codeoutput, pre.code-matlab { width:100%; word-wrap:break-word; } } + +/* syntax highlighting */ + +span.keyword { color:#0000FF } +span.string { color:#A020F0 } +span.comment { color:#228B22 } +span.untermstring { color:#B20000 } +span.syscmd { color:#B28C00 } + +/* footer */ + +.footer { width:auto; padding:10px 0px; margin:25px 0px 0px; border-top:1px dotted #878787; font-size:0.8em; font-style:italic; color:#878787; } +.footer p { margin:0; } +.footer a { color:#878787; } +.footer a:hover { color:#878787; text-decoration:underline; } +.footer a:visited { color:#878787; } diff --git a/utils/publish_mathjax.m b/utils/publish_mathjax.m new file mode 100644 index 000000000..fd27041aa --- /dev/null +++ b/utils/publish_mathjax.m @@ -0,0 +1,46 @@ +function outputFilename = publish_mathjax(file, opts, varargin) + %PUBLISH_MATHJAX Publish to HTML, and use MathJax to render equations + % + % ## Input + % * __file__ M-file to publish. + % * __opts__ Structure of options. By default this function uses a custom + % XSL stylesheet to publish as HTML with MathJax rendered equations, i.e + % `struct('format','html', 'stylesheet','mxdom2mathjax.xsl')` + % + % ## Output + % * __outputFilename__ path to the generated HTML document. + % + % ## Options + % Accepts the same option as the publish function. + % + % ## Example + % + % html = publish_mathjax('my_script.m'); + % web(html, '-browser') + % + % showdemo('my_script') + % + % ## References + % Inspired by a MathWorks Support Team + % [solution](https://www.mathworks.com/matlabcentral/answers/93851). + % + % See also: publish, grabcode + % + + % path to this directory + dname = fileparts(mfilename('fullpath')); + + % HTML publish options with custom XSL + if nargin < 2, opts = struct(); end + opts.format = 'html'; + opts.stylesheet = fullfile(dname, 'mxdom2mathjax.xsl'); + + % publish + outputFilename = publish(file, opts, varargin{:}); + + % copy CSS file to output directory + outputCSS = fullfile(fileparts(outputFilename), 'publish_custom.css'); + if true || exist(outputCSS, 'file') ~= 2 + copyfile(fullfile(dname, 'publish_custom.css'), outputCSS); + end +end