cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/build
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/build
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/build
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/pch_Generate_opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/pch_Generate_opencv_line_descriptor.dir/build
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/opencv_line_descriptor_pch_dephelp.cxx.o
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/opencv_line_descriptor_pch_dephelp.cxx.i
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor_pch_dephelp.dir/opencv_line_descriptor_pch_dephelp.cxx.s
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/samples/compute_descriptors.cpp.o
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/samples/compute_descriptors.cpp.i
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_compute_descriptors.dir/samples/compute_descriptors.cpp.s
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/samples/lines_extraction.cpp.o
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/samples/lines_extraction.cpp.i
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/build.make modules/line_descriptor/CMakeFiles/example_line_descriptor_lines_extraction.dir/samples/lines_extraction.cpp.s
.PHONY :samples/lines_extraction.cpp.s
src/BinaryDescriptor.o:src/BinaryDescriptor.cpp.o
.PHONY :src/BinaryDescriptor.o
# target to build an object file
src/BinaryDescriptor.cpp.o:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/BinaryDescriptor.cpp.o
.PHONY :src/BinaryDescriptor.cpp.o
src/BinaryDescriptor.i:src/BinaryDescriptor.cpp.i
.PHONY :src/BinaryDescriptor.i
# target to preprocess a source file
src/BinaryDescriptor.cpp.i:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/BinaryDescriptor.cpp.i
.PHONY :src/BinaryDescriptor.cpp.i
src/BinaryDescriptor.s:src/BinaryDescriptor.cpp.s
.PHONY :src/BinaryDescriptor.s
# target to generate assembly for a file
src/BinaryDescriptor.cpp.s:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/BinaryDescriptor.cpp.s
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/line_descriptor_init.cpp.o
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/line_descriptor_init.cpp.i
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/line_descriptor_init.cpp.s
.PHONY :src/line_descriptor_init.cpp.s
src/precomp.o:src/precomp.cpp.o
.PHONY :src/precomp.o
# target to build an object file
src/precomp.cpp.o:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/precomp.cpp.o
.PHONY :src/precomp.cpp.o
src/precomp.i:src/precomp.cpp.i
.PHONY :src/precomp.i
# target to preprocess a source file
src/precomp.cpp.i:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/precomp.cpp.i
.PHONY :src/precomp.cpp.i
src/precomp.s:src/precomp.cpp.s
.PHONY :src/precomp.s
# target to generate assembly for a file
src/precomp.cpp.s:
cd /home/ubisum/src/opencv &&$(MAKE)-f modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/build.make modules/line_descriptor/CMakeFiles/opencv_line_descriptor.dir/src/precomp.cpp.s
.PHONY :src/precomp.cpp.s
# Help Target
help:
@echo"The following are some of the valid targets for this Makefile:"
@echo"... all (the default if no target is provided)"
BinaryDescriptor Class implements both functionalities for detection of lines and computation of their binary descriptor. Class' interface is mainly based on the ones of classical detectors and extractors, such as Feature2d's `FeatureDetector <http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=featuredetector#featuredetector>`_ and `DescriptorExtractor <http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html?highlight=extractor#DescriptorExtractor : public Algorithm>`_.
Retrieved information about lines is stored in *KeyLine* objects.
If no argument is provided, constructor sets default values (see comments in the code snippet in previous section). Default values are strongly reccomended.
BinaryDescriptor::getNumOfOctaves
---------------------------------
Get current number of octaves
.. ocv:function:: int BinaryDescriptor::getNumOfOctaves()
BinaryDescriptor::setNumOfOctaves
---------------------------------
Set number of octaves
.. ocv:function:: void BinaryDescriptor::setNumOfOctaves( int octaves )
:param octaves: number of octaves
BinaryDescriptor::getWidthOfBand
--------------------------------
Get current width of bands
.. ocv:function:: int BinaryDescriptor::getWidthOfBand()
BinaryDescriptor::setWidthOfBand
--------------------------------
Set width of bands
.. ocv:function:: void BinaryDescriptor::setWidthOfBand( int width )
:param width: width of bands
BinaryDescriptor::getReductionRatio
-----------------------------------
Get current reduction ratio (used in Gaussian pyramids)
.. ocv:function:: int BinaryDescriptor::getReductionRatio()
BinaryDescriptor::setReductionRatio
-----------------------------------
Set reduction ratio (used in Gaussian pyramids)
.. ocv:function:: void BinaryDescriptor::setReductionRatio( int rRatio )
:param rRatio: reduction ratio
BinaryDescriptor::createBinaryDescriptor
----------------------------------------
Create a BinaryDescriptor object with default parameters (or with the ones provided) and return a smart pointer to it
:param mask: mask matrix to select which lines in KeyLines must be accepted among the ones extracted (used when *keylines* is not empty)
:param keylines: vector that contains input lines (when filled, the detection part will be skipped and input lines will be passed as input to the algorithm computing descriptors)
:param descriptors: matrix that will store final descriptors
:param useProvidedKeyLines: flag (when set to true, detection phase will be skipped and only computation of descriptors will be executed, using lines provided in *keylines*)
BinaryDescriptor::read
----------------------
Read parameters from a FileNode object and store them
:param keylines1: keylines extracted from first image
:param img2: second image
:param keylines2: keylines extracted from second image
:param matches1to2: vector of matches
:param outImg: output matrix to draw on
:param matchColor: drawing color for matches (chosen randomly in case of default value)
:param singleLineColor: drawing color for keylines (chosen randomly in case of default value)
:param matchesMask: mask to indicate which matches must be drawn
:param flags: drawing flags
.. note:: If both *matchColor* and *singleLineColor* are set to their default values, function draws matched lines and line connecting them with same color
The structure of drawing flags is shown in the following:
.. code-block:: cpp
/* struct for drawing options */
struct CV_EXPORTS DrawLinesMatchesFlags
{
enum
{
DEFAULT = 0, // Output image matrix will be created (Mat::create),
// i.e. existing memory of output image may be reused.
// Two source images, matches, and single keylines
// will be drawn.
DRAW_OVER_OUTIMG = 1, // Output image matrix will not be
// created (using Mat::create). Matches will be drawn
// on existing content of output image.
NOT_DRAW_SINGLE_LINES = 2 // Single keylines will not be drawn.
One of the most challenging activities in computer vision is the extraction of useful information from a given image. Such information, usually comes in the form of points that preserve some kind of property (for instance, they are scale-invariant) and are actually representative of input image.
The goal of this module is seeking a new kind of representative information inside an image and providing the functionalities for its extraction and representation. In particular, differently from previous methods for detection of relevant elements inside an image, lines are extracted in place of points; a new class is defined ad hoc to summarize a line's properties, for reuse and plotting purposes.
A class to represent a line: KeyLine
------------------------------------
As aformentioned, it is been necessary to design a class that fully stores the information needed to characterize completely a line and plot it on image it was extracted from, when required.
*KeyLine* class has been created for such goal; it is mainly inspired to Feature2d's KeyPoint class, since KeyLine shares some of *KeyPoint*'s fields, even if a part of them assumes a different meaning, when speaking about lines.
In particular:
* the *class_id* field is used to gather lines extracted from different octaves which refer to same line inside original image (such lines and the one they represent in original image share the same *class_id* value)
* the *angle* field represents line's slope with respect to (positive) X axis
* the *pt* field represents line's midpoint
* the *response* field is computed as the ratio between the line's length and maximum between image's width and height
* the *size* field is the area of the smallest rectangle containing line
Apart from fields inspired to KeyPoint class, KeyLines stores information about extremes of line in original image and in octave it was extracted from, about line's length and number of pixels it covers. Code relative to KeyLine class is reported in the following snippet:
.. ocv:class:: KeyLine
::
class CV_EXPORTS_W KeyLine
{
public:
/* orientation of the line */
float angle;
/* object ID, that can be used to cluster keylines by the line they represent */
int class_id;
/* octave (pyramid layer), from which the keyline has been extracted */
int octave;
/* coordinates of the middlepoint */
Point pt;
/* the response, by which the strongest keylines have been selected.
It's represented by the ratio between line's length and maximum between
image's width and height */
float response;
/* minimum area containing line */
float size;
/* lines's extremes in original image */
float startPointX;
float startPointY;
float endPointX;
float endPointY;
/* line's extremes in image it was extracted from */
float sPointInOctaveX;
float sPointInOctaveY;
float ePointInOctaveX;
float ePointInOctaveY;
/* the length of line */
float lineLength;
/* number of pixels covered by the line */
unsigned int numOfPixels;
/* constructor */
KeyLine(){}
};
Lines extraction methodology
----------------------------
The lines extraction methodology described in the following is mainly based on [LBD]_.
The extraction starts with a Gaussian pyramid generated from an original image, downsampled N-1 times, blurred N times, to obtain N layers (one for each octave), with layer 0 corresponding to input image. Then, from each layer (octave) in the pyramid, lines are extracted using LSD algorithm.
Differently from EDLine lines extractor used in original article, LSD furnishes information only about lines extremes; thus, additional information regarding slope and equation of line are computed via analytic methods. The number of pixels is obtained using `LineIterator <http://docs.opencv.org/modules/core/doc/drawing_functions.html#lineiterator>`_. Later on, all extracted lines are arranged in buckets: two lines fall in the same bucket if they represent the same line in different octave (they have the same direction and belong to same region of original image). The set of buckets becomes the input for descriptors computation.
Computation of binary descriptors
---------------------------------
To obtatin a binary descriptor representing a certain line detected from a certain octave of an image, we first compute a non-binary descriptor as described in [LBD]_. Given a line, we consider a rectangular region centered at it and called *line support region (LSR)*. Such region is divided into a set of bands :math:`\{B_1, B_2, ..., B_m\}`, whose length equals the one of line.
If we indicate with :math:`\bf{d}_L` the direction of line, the orthogonal and clockwise direction to line :math:`\bf{d}_{\perp}` can be determined; these two directions, are used to construct a reference frame centered in the middle point of line. The gradients of pixels :math:`\bf{g'}` inside LSR can be projected to the newly determined frame, obtaining their local equivalent :math:`\bf{g'} = (\bf{g}^T \cdot \bf{d}_{\perp}, \bf{g}^T \cdot \bf{d}_L)^T \triangleq (\bf{g'}_{d_{\perp}}, \bf{g'}_{d_L})^T`.
Later on, a Gaussian function is applied to all LSR's pixels along :math:`\bf{d}_\perp` direction; first, we assign a global weighting coefficient :math:`f_g(i) = (1/\sqrt{2\pi}\sigma_g)e^{-d^2_i/2\sigma^2_g}` to *i*-th row in LSR, where :math:`d_i` is the distance of *i*-th row from the center row in LSR, :math:`\sigma_g = 0.5(m \cdot w - 1)` and :math:`w` is the width of bands (the same for every band). Secondly, considering a band :math:`B_j` and its neighbor bands :math:`B_{j-1}, B_{j+1}`, we assign a local weighting :math:`F_l(k) = (1/\sqrt{2\pi}\sigma_l)e^{-d'^2_k/2\sigma_l^2}`, where :math:`d'_k` is the distance of *k*-th row from the center row in :math:`B_j` and :math:`\sigma_l = w`. Using the global and local weights, we obtain, at the same time, the reduction of role played by gradients far from line and of boundary effect, respectively.
Each band :math:`B_j` in LSR has an associated *band descriptor(BD)* which is computed considering previous and next band (top and bottom bands are ignored when computing descriptor for first and last band). Once each band has been assignen its BD, the LBD descriptor of line is simply given by
.. math::
LBD = (BD_1^T, BD_2^T, ... , BD^T_m)^T.
To compute a band descriptor :math:`B_j`, each *k*-th row in it is considered and the gradients in such row are accumulated:
Once the LBD has been obtained, it must be converted into a binary form. For such purpose, we consider 32 possible pairs of BD inside it; each couple of BD is compared bit by bit and comparison generates an 8 bit string. Concatenating 32 comparison strings, we get the 256-bit final binary representation of a single LBD.
Related Pages
-------------
* `BinaryDescriptor <BinaryDescriptor.html>`
References
----------
.. [LBD] Zhang, Lilian, and Reinhard Koch. *An efficient and robust line segment matching approach based on LBD descriptor and pairwise geometric consistency*, Journal of Visual Communication and Image Representation 24.7 (2013): 794-805.
Once descriptors have been extracted from an image (both they represent lines and points), it becomes interesting to be able to match a descriptor with another one extracted from a different image and representing the same line or point, seen from a differente perspective or on a different scale.
In reaching such goal, the main headache is designing an efficient search algorithm to associate a query descriptor to one extracted from a dataset.
In the following, a matching modality based on *Multi-Index Hashing (MiHashing)* will be described.
Multi-Index Hashing
-------------------
The theory described in this section is based on [MIH]_.
Given a dataset populated with binary codes, each code is indexed *m* times into *m* different hash tables, according to *m* substrings it has been divided into. Thus, given a query code, all the entries close to it at least in one substring are returned by search as *neighbor candidates*. Returned entries are then checked for validity by verifying that their full codes are not distant (in Hamming space) more than *r* bits from query code.
In details, each binary code **h** composed of *b* bits is divided into *m* disjoint substrings :math:`\mathbf{h}^{(1)}, ..., \mathbf{h}^{(m)}`, each with length :math:`\lfloor b/m \rfloor` or :math:`\lceil b/m \rceil` bits. Formally, when two codes **h** and **g** differ by at the most *r* bits, in at the least one of their *m* substrings they differ by at the most :math:`\lfloor r/m \rfloor` bits. In particular, when :math:`||\mathbf{h}-\mathbf{g}||_H \le r` (where :math:`||.||_H` is the Hamming norm), there must exist a substring *k* (with :math:`1 \le k \le m`) such that
That means that if Hamming distance between each of the *m* substring is strictly greater than :math:`\lfloor r/m \rfloor`, then :math:`||\mathbf{h}-\mathbf{g}||_H` must be larger that *r* and that is a contradiction.
If the codes in dataset are divided into *m* substrings, then *m* tables will be built. Given a query **q** with substrings :math:`\{\mathbf{q}^{(i)}\}^m_{i=1}`, *i*-th hash table is searched for entries distant at the most :math:`\lfloor r/m \rfloor` from :math:`\mathbf{q}^{(i)}` and a set of candidates :math:`\mathcal{N}_i(\mathbf{q})` is obtained.
The union of sets :math:`\mathcal{N}(\mathbf{q}) = \bigcup_i \mathcal{N}_i(\mathbf{q})` is a superset of the *r*-neighbors of **q**. Then, last step of algorithm is computing the Hamming distance between **q** and each element in :math:`\mathcal{N}(\mathbf{q})`, deleting the codes that are distant more that *r* from **q**.
BinaryDescriptorMatcher Class
=============================
BinaryDescriptorMatcher Class furnishes all functionalities for querying a dataset provided by user or internal to class (that user must, anyway, populate) on the model of Feature2d's `DescriptorMatcher <http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html?highlight=bfmatcher#descriptormatcher>`_.
.. note:: Every time this function is invoked, current dataset is deleted and locally stored descriptors are inserted into dataset. The locally stored copy of just inserted descriptors is then removed.
:param trainDescriptors: dataset of descriptors furnished by user
:param matches: vector to host retrieved matches
:param mask: mask to select which input descriptors must be matched to one in dataset
:param masks: vector of masks to select which input descriptors must be matched to one in dataset (the *i*-th mask in vector indicates whether each input query can be matched with descriptors in dataset relative to *i*-th image)
BinaryDescriptorMatcher::knnMatch
---------------------------------
For every input query descriptor, retrieve the best *k* matching ones from a dataset provided from user or from the one internal to class
:param trainDescriptors: dataset of descriptors furnished by user
:param matches: vector to host retrieved matches
:param k: number of the closest descriptors to be returned for every input query
:param mask: mask to select which input descriptors must be matched to ones in dataset
:param masks: vector of masks to select which input descriptors must be matched to ones in dataset (the *i*-th mask in vector indicates whether each input query can be matched with descriptors in dataset relative to *i*-th image)
:param compactResult: flag to obtain a compact result (if true, a vector that doesn't contain any matches for a given query is not inserted in final result)
BinaryDescriptorMatcher::radiusMatch
------------------------------------
For every input query descriptor, retrieve, from a dataset provided from user or from the one internal to class, all the descriptors that are not further than *maxDist* from input query
:param trainDescriptors: dataset of descriptors furnished by user
:param matches: vector to host retrieved matches
:param maxDist: search radius
:param mask: mask to select which input descriptors must be matched to ones in dataset
:param masks: vector of masks to select which input descriptors must be matched to ones in dataset (the *i*-th mask in vector indicates whether each input query can be matched with descriptors in dataset relative to *i*-th image)
:param compactResult: flag to obtain a compact result (if true, a vector that doesn't contain any matches for a given query is not inserted in final result)
Related pages
-------------
* `Binary descriptors for lines extracted from an image <line_descriptor.html>`_
* `BinaryDescriptor Class <binary_descriptor.html>`_
* `Drawing Function of Keylines and Matches <drawing_functions.html>`_
References
----------
.. [MIH] Norouzi, Mohammad, Ali Punjani, and David J. Fleet. *Fast search in hamming space with multi-index hashing*, Computer Vision and Pattern Recognition (CVPR), 2012 IEEE Conference on. IEEE, 2012.