Commit a20c5c8d authored by Maksim Shabunin's avatar Maksim Shabunin

Doxygen documentation for all modules

parent 525c4d5e
...@@ -38,3 +38,7 @@ or tort (including negligence or otherwise) arising in any way out of ...@@ -38,3 +38,7 @@ or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage. the use of this software, even if advised of the possibility of such damage.
*/ */
/** @defgroup adas Advanced Driver Assistance
*/
@incollection{KB2001,
title={An improved adaptive background mixture model for real-time tracking with shadow detection},
author={KaewTraKulPong, Pakorn and Bowden, Richard},
booktitle={Video-Based Surveillance Systems},
pages={135--144},
year={2002},
publisher={Springer}
}
@inproceedings{Gold2012,
title={Visual tracking of human visitors under variable-lighting conditions for a responsive audio art installation},
author={Godbehere, Andrew B and Matsukawa, Akihiro and Goldberg, Ken},
booktitle={American Control Conference (ACC), 2012},
pages={4305--4312},
year={2012},
organization={IEEE}
}
...@@ -44,21 +44,21 @@ the use of this software, even if advised of the possibility of such damage. ...@@ -44,21 +44,21 @@ the use of this software, even if advised of the possibility of such damage.
#ifdef __cplusplus #ifdef __cplusplus
/** @defgroup bgsegm Improved Background-Foreground Segmentation Methods
*/
namespace cv namespace cv
{ {
namespace bgsegm namespace bgsegm
{ {
/*! //! @addtogroup bgsegm
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm //! @{
The class implements the following algorithm: /** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/ The class implements the algorithm described in @cite KB2001.
*/
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{ {
public: public:
...@@ -75,54 +75,118 @@ public: ...@@ -75,54 +75,118 @@ public:
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0; CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
}; };
/** @brief Creates mixture-of-gaussian background subtractor
@param history Length of the history.
@param nmixtures Number of Gaussian mixtures.
@param backgroundRatio Background ratio.
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG> CV_EXPORTS_W Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5, createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0); double backgroundRatio=0.7, double noiseSigma=0);
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) /** @brief Background Subtractor module based on the algorithm given in @cite Gold2012.
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under Takes a series of images and returns a sequence of mask (8UC1)
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, images of the same size, where 255 indicates Foreground and 0 represents Background.
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. This class implements an algorithm described in "Visual Tracking of Human Visitors under
Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/ */
class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor
{ {
public: public:
/** @brief Returns total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual int getMaxFeatures() const = 0; CV_WRAP virtual int getMaxFeatures() const = 0;
/** @brief Sets total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
/** @brief Returns the learning rate of the algorithm.
It lies between 0.0 and 1.0. It determines how quickly features are "forgotten" from
histograms.
*/
CV_WRAP virtual double getDefaultLearningRate() const = 0; CV_WRAP virtual double getDefaultLearningRate() const = 0;
/** @brief Sets the learning rate of the algorithm.
*/
CV_WRAP virtual void setDefaultLearningRate(double lr) = 0; CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;
/** @brief Returns the number of frames used to initialize background model.
*/
CV_WRAP virtual int getNumFrames() const = 0; CV_WRAP virtual int getNumFrames() const = 0;
/** @brief Sets the number of frames used to initialize background model.
*/
CV_WRAP virtual void setNumFrames(int nframes) = 0; CV_WRAP virtual void setNumFrames(int nframes) = 0;
/** @brief Returns the parameter used for quantization of color-space.
It is the number of discrete levels in each channel to be used in histograms.
*/
CV_WRAP virtual int getQuantizationLevels() const = 0; CV_WRAP virtual int getQuantizationLevels() const = 0;
/** @brief Sets the parameter used for quantization of color-space
*/
CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0; CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;
/** @brief Returns the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual double getBackgroundPrior() const = 0; CV_WRAP virtual double getBackgroundPrior() const = 0;
/** @brief Sets the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0; CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;
/** @brief Returns the kernel radius used for morphological operations
*/
CV_WRAP virtual int getSmoothingRadius() const = 0; CV_WRAP virtual int getSmoothingRadius() const = 0;
/** @brief Sets the kernel radius used for morphological operations
*/
CV_WRAP virtual void setSmoothingRadius(int radius) = 0; CV_WRAP virtual void setSmoothingRadius(int radius) = 0;
/** @brief Returns the value of decision threshold.
Decision value is the value above which pixel is determined to be FG.
*/
CV_WRAP virtual double getDecisionThreshold() const = 0; CV_WRAP virtual double getDecisionThreshold() const = 0;
/** @brief Sets the value of decision threshold.
*/
CV_WRAP virtual void setDecisionThreshold(double thresh) = 0; CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;
/** @brief Returns the status of background model update
*/
CV_WRAP virtual bool getUpdateBackgroundModel() const = 0; CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;
/** @brief Sets the status of background model update
*/
CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0; CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;
/** @brief Returns the minimum value taken on by pixels in image sequence. Usually 0.
*/
CV_WRAP virtual double getMinVal() const = 0; CV_WRAP virtual double getMinVal() const = 0;
/** @brief Sets the minimum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMinVal(double val) = 0; CV_WRAP virtual void setMinVal(double val) = 0;
/** @brief Returns the maximum value taken on by pixels in image sequence. e.g. 1.0 or 255.
*/
CV_WRAP virtual double getMaxVal() const = 0; CV_WRAP virtual double getMaxVal() const = 0;
/** @brief Sets the maximum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMaxVal(double val) = 0; CV_WRAP virtual void setMaxVal(double val) = 0;
}; };
/** @brief Creates a GMG Background Subtractor
@param initializationFrames number of frames used to initialize the background models.
@param decisionThreshold Threshold value, above which it is marked foreground, else background.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120, CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8); double decisionThreshold=0.8);
//! @}
} }
} }
......
@article{Benoit2010,
title={Using human visual system modeling for bio-inspired low level image processing},
author={Benoit, Alexandre and Caplier, Alice and Durette, Barth{\'e}l{\'e}my and H{\'e}rault, Jeanny},
journal={Computer vision and Image understanding},
volume={114},
number={7},
pages={758--773},
year={2010},
publisher={Elsevier}
}
@inproceedings{Strat2013,
title={Retina enhanced SIFT descriptors for video indexing},
author={Strat, Sabin Tiberius and Benoit, Alexandre and Lambert, Patrick},
booktitle={Content-Based Multimedia Indexing (CBMI), 2013 11th International Workshop on},
pages={201--206},
year={2013},
organization={IEEE}
}
@book{Herault2010,
title={Vision: Images, Signals and Neural Networks-Models of Neural Processing in Visual Perception},
author={Jeanny, Herault},
year={2010},
publisher={World Scientific}
}
@inproceedings{Chaix2007,
title={Efficient demosaicing through recursive filtering},
author={De Lavar{\`e}ne, Brice Chaix and Alleysson, David and Durette, Barth{\'e}l{\'e}my and H{\'e}rault, Jeanny},
booktitle={Image Processing, 2007. ICIP 2007. IEEE International Conference on},
volume={2},
pages={II--189},
year={2007},
organization={IEEE}
}
@article{Meylan2007,
title={Model of retinal local adaptation for the tone mapping of color filter array images},
author={Meylan, Laurence and Alleysson, David and S{\"u}sstrunk, Sabine},
journal={JOSA A},
volume={24},
number={9},
pages={2807--2816},
year={2007},
publisher={Optical Society of America}
}
...@@ -7,4 +7,4 @@ The module provides biological visual systems models (human visual system and ot ...@@ -7,4 +7,4 @@ The module provides biological visual systems models (human visual system and ot
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
Human retina documentation <retina/index> Human retina documentation <retina>
This diff is collapsed.
...@@ -47,4 +47,14 @@ ...@@ -47,4 +47,14 @@
#include "opencv2/bioinspired/retina.hpp" #include "opencv2/bioinspired/retina.hpp"
#include "opencv2/bioinspired/retinafasttonemapping.hpp" #include "opencv2/bioinspired/retinafasttonemapping.hpp"
#include "opencv2/bioinspired/transientareassegmentationmodule.hpp" #include "opencv2/bioinspired/transientareassegmentationmodule.hpp"
/** @defgroup bioinspired Biologically inspired vision models and derivated tools
The module provides biological visual systems models (human visual system and others). It also
provides derivated objects that take advantage of those bio-inspired models.
@ref bioinspired_retina
*/
#endif #endif
...@@ -67,11 +67,10 @@ ...@@ -67,11 +67,10 @@
#ifndef __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ #ifndef __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
#define __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ #define __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
/* /**
* retinafasttonemapping.hpp @file
* @date May 26, 2013
* Created on: May 26, 2013 @author Alexandre Benoit
* Author: Alexandre Benoit
*/ */
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support #include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
...@@ -79,43 +78,61 @@ ...@@ -79,43 +78,61 @@
namespace cv{ namespace cv{
namespace bioinspired{ namespace bioinspired{
/** //! @addtogroup bioinspired
* a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV. //! @{
* This algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.
* As a summary, these are the model properties: /** @brief a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV.
* => 2 stages of local luminance adaptation with a different local neighborhood for each.
* => first stage models the retina photorecetors local luminance adaptation This algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.
* => second stage models th ganglion cells local information adaptation As a summary, these are the model properties:
* => compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters. - 2 stages of local luminance adaptation with a different local neighborhood for each.
* ====> this can help noise robustness and temporal stability for video sequence use cases. - first stage models the retina photorecetors local luminance adaptation
* for more information, read to the following papers : - second stage models th ganglion cells local information adaptation
* Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 - compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters.
* regarding spatio-temporal filter and the bigger retina model : this can help noise robustness and temporal stability for video sequence use cases.
* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
*/ for more information, read to the following papers :
Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
regarding spatio-temporal filter and the bigger retina model :
Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
*/
class CV_EXPORTS_W RetinaFastToneMapping : public Algorithm class CV_EXPORTS_W RetinaFastToneMapping : public Algorithm
{ {
public: public:
/** /** @brief applies a luminance correction (initially High Dynamic Range (HDR) tone mapping)
* method that applies a luminance correction (initially High Dynamic Range (HDR) tone mapping) using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal smoothing and eventually high frequencies attenuation. This is a lighter method than the one available using the regular retina::run method. It is then faster but it does not include complete temporal filtering nor retina spectral whitening. Then, it can have a more limited effect on images with a very high dynamic range. This is an adptation of the original still image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's work, please cite:
* -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816 using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors
@param inputImage the input image to process RGB or gray levels level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal
@param outputToneMappedImage the output tone mapped image smoothing and eventually high frequencies attenuation. This is a lighter method than the one
*/ available using the regular retina::run method. It is then faster but it does not include
complete temporal filtering nor retina spectral whitening. Then, it can have a more limited
effect on images with a very high dynamic range. This is an adptation of the original still
image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's
work, please cite: -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local
Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of
America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816
@param inputImage the input image to process RGB or gray levels
@param outputToneMappedImage the output tone mapped image
*/
CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0; CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;
/** /** @brief updates tone mapping behaviors by adjusing the local luminance computation area
* setup method that updates tone mapping behaviors by adjusing the local luminance computation area
* @param photoreceptorsNeighborhoodRadius the first stage local adaptation area @param photoreceptorsNeighborhoodRadius the first stage local adaptation area
* @param ganglioncellsNeighborhoodRadius the second stage local adaptation area @param ganglioncellsNeighborhoodRadius the second stage local adaptation area
* @param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information (default is 1, see reference paper) @param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information
(default is 1, see reference paper)
*/ */
CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0; CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0;
}; };
//! @relates bioinspired::RetinaFastToneMapping
CV_EXPORTS_W Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize); CV_EXPORTS_W Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize);
//! @}
} }
} }
#endif /* __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ */ #endif /* __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ */
...@@ -49,8 +49,14 @@ ...@@ -49,8 +49,14 @@
#include <vector> #include <vector>
/** @defgroup ccalib Custom Calibration Pattern for 3D reconstruction
*/
namespace cv{ namespace ccalib{ namespace cv{ namespace ccalib{
//! @addtogroup ccalib
//! @{
class CV_EXPORTS CustomPattern : public Algorithm class CV_EXPORTS CustomPattern : public Algorithm
{ {
public: public:
...@@ -66,11 +72,11 @@ public: ...@@ -66,11 +72,11 @@ public:
bool isInitialized(); bool isInitialized();
void getPatternPoints(OutputArray original_points); void getPatternPoints(OutputArray original_points);
/* /**<
Returns a vector<Point> of the original points. Returns a vector<Point> of the original points.
*/ */
double getPixelSize(); double getPixelSize();
/* /**<
Get the pixel size of the pattern Get the pixel size of the pattern
*/ */
...@@ -86,7 +92,7 @@ public: ...@@ -86,7 +92,7 @@ public:
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)); TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/* /**<
Calls the calirateCamera function with the same inputs. Calls the calirateCamera function with the same inputs.
*/ */
...@@ -94,7 +100,7 @@ public: ...@@ -94,7 +100,7 @@ public:
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE); OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
bool findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs, bool findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE); OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
/* /**<
Uses solvePnP to find the rotation and translation of the pattern Uses solvePnP to find the rotation and translation of the pattern
with respect to the camera frame. with respect to the camera frame.
*/ */
...@@ -105,13 +111,13 @@ public: ...@@ -105,13 +111,13 @@ public:
bool findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs, bool findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE); float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);
/* /**<
Uses solvePnPRansac() Uses solvePnPRansac()
*/ */
void drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix, void drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix,
InputArray distCoeffs, double axis_length = 3, int axis_width = 2); InputArray distCoeffs, double axis_length = 3, int axis_width = 2);
/* /**<
pattern_corners -> projected over the image position of the edges of the pattern. pattern_corners -> projected over the image position of the edges of the pattern.
*/ */
...@@ -144,6 +150,8 @@ private: ...@@ -144,6 +150,8 @@ private:
void refineKeypointsPos(const Mat& img, std::vector<KeyPoint>& kp); void refineKeypointsPos(const Mat& img, std::vector<KeyPoint>& kp);
}; };
//! @}
}} // namespace ccalib, cv }} // namespace ccalib, cv
#endif #endif
...@@ -7,5 +7,5 @@ The module provides an interactive GUI to debug and incrementally design compute ...@@ -7,5 +7,5 @@ The module provides an interactive GUI to debug and incrementally design compute
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
CVV API Documentation <cvv_api/index> CVV API Documentation <cvv_api>
CVV GUI Documentation <cvv_gui/index> CVV GUI Documentation <cvv_gui>
...@@ -7,6 +7,10 @@ ...@@ -7,6 +7,10 @@
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
...@@ -49,6 +53,9 @@ struct CallMetaData ...@@ -49,6 +53,9 @@ struct CallMetaData
const bool isKnown; const bool isKnown;
}; };
} }
//! @}
} // namespaces } // namespaces
#ifdef __GNUC__ #ifdef __GNUC__
......
/**
@defgroup cvv GUI for Interactive Visual Debugging of Computer Vision Programs
Namespace for all functions is **cvv**, i.e. *cvv::showImage()*.
Compilation:
- For development, i.e. for cvv GUI to show up, compile your code using cvv with
*g++ -DCVVISUAL_DEBUGMODE*.
- For release, i.e. cvv calls doing nothing, compile your code without above flag.
See cvv tutorial for a commented example application using cvv.
*/
#include <opencv2/cvv/call_meta_data.hpp> #include <opencv2/cvv/call_meta_data.hpp>
#include <opencv2/cvv/debug_mode.hpp> #include <opencv2/cvv/debug_mode.hpp>
#include <opencv2/cvv/dmatch.hpp> #include <opencv2/cvv/dmatch.hpp>
#include <opencv2/cvv/filter.hpp> #include <opencv2/cvv/filter.hpp>
#include <opencv2/cvv/final_show.hpp> #include <opencv2/cvv/final_show.hpp>
#include <opencv2/cvv/show_image.hpp> #include <opencv2/cvv/show_image.hpp>
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
...@@ -24,22 +27,25 @@ static inline bool &getDebugFlag() ...@@ -24,22 +27,25 @@ static inline bool &getDebugFlag()
} // namespace impl } // namespace impl
/** /** @brief Returns whether debug-mode is active for this TU and thread.
* @brief Returns whether debug-mode is active for this TU and thread. */
*/
static inline bool debugMode() static inline bool debugMode()
{ {
return impl::getDebugFlag(); return impl::getDebugFlag();
} }
/** /** @brief Enable or disable cvv for current translation unit and thread
* @brief Set the debug-mode for this TU and thread.
(disabled this way has higher - but still low - overhead compared to using the compile flags).
@param active
*/ */
static inline void setDebugFlag(bool active) static inline void setDebugFlag(bool active)
{ {
impl::getDebugFlag() = active; impl::getDebugFlag() = active;
} }
//! @}
} // namespace cvv } // namespace cvv
#endif #endif
...@@ -9,9 +9,16 @@ ...@@ -9,9 +9,16 @@
#include "call_meta_data.hpp" #include "call_meta_data.hpp"
#include "debug_mode.hpp" #include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
...@@ -22,6 +29,22 @@ void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, ...@@ -22,6 +29,22 @@ void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
} // namespace impl } // namespace impl
#ifdef CVVISUAL_DEBUGMODE #ifdef CVVISUAL_DEBUGMODE
/** @brief Add a filled in DMatch \<dmatch\> to debug GUI.
The matches can are visualized for interactive inspection in different GUI views (one similar to an
interactive :draw_matches:drawMatches\<\>).
@param img1 First image used in DMatch \<dmatch\>.
@param keypoints1 Keypoints of first image.
@param img2 Second image used in DMatch.
@param keypoints2 Keypoints of second image.
@param matches
@param data See showImage
@param description See showImage
@param view See showImage
@param useTrainDescriptor Use DMatch \<dmatch\>'s train descriptor index instead of query
descriptor index.
*/
static inline void static inline void
debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2, cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2,
...@@ -36,6 +59,7 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, ...@@ -36,6 +59,7 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
data, description, view, useTrainDescriptor); data, description, view, useTrainDescriptor);
} }
} }
/** @overload */
static inline void static inline void
debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2, cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2,
...@@ -52,9 +76,6 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1, ...@@ -52,9 +76,6 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
} }
} }
#else #else
/**
* @brief Debug a set of matches between two images.
*/
static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>, static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
cv::InputArray, std::vector<cv::KeyPoint>, cv::InputArray, std::vector<cv::KeyPoint>,
std::vector<cv::DMatch>, std::vector<cv::DMatch>,
...@@ -63,9 +84,6 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>, ...@@ -63,9 +84,6 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
bool = true) bool = true)
{ {
} }
/**
* Dito.
*/
static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>, static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
cv::InputArray, std::vector<cv::KeyPoint>, cv::InputArray, std::vector<cv::KeyPoint>,
std::vector<cv::DMatch>, std::vector<cv::DMatch>,
...@@ -75,6 +93,8 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>, ...@@ -75,6 +93,8 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
} }
#endif #endif
//! @}
} // namespace cvv } // namespace cvv
#endif #endif
...@@ -8,9 +8,16 @@ ...@@ -8,9 +8,16 @@
#include "call_meta_data.hpp" #include "call_meta_data.hpp"
#include "debug_mode.hpp" #include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
// implementation outside API // implementation outside API
...@@ -20,6 +27,11 @@ void debugFilter(cv::InputArray original, cv::InputArray result, ...@@ -20,6 +27,11 @@ void debugFilter(cv::InputArray original, cv::InputArray result,
} // namespace impl } // namespace impl
#ifdef CVVISUAL_DEBUGMODE #ifdef CVVISUAL_DEBUGMODE
/**
* @brief Use the debug-framework to compare two images (from which the second
* is intended to be the result of
* a filter applied to the first).
*/
static inline void static inline void
debugFilter(cv::InputArray original, cv::InputArray result, debugFilter(cv::InputArray original, cv::InputArray result,
impl::CallMetaData metaData = impl::CallMetaData(), impl::CallMetaData metaData = impl::CallMetaData(),
...@@ -31,6 +43,7 @@ debugFilter(cv::InputArray original, cv::InputArray result, ...@@ -31,6 +43,7 @@ debugFilter(cv::InputArray original, cv::InputArray result,
view); view);
} }
} }
/** @overload */
static inline void debugFilter(cv::InputArray original, cv::InputArray result, static inline void debugFilter(cv::InputArray original, cv::InputArray result,
impl::CallMetaData metaData, impl::CallMetaData metaData,
const ::std::string &description, const ::std::string &description,
...@@ -43,20 +56,12 @@ static inline void debugFilter(cv::InputArray original, cv::InputArray result, ...@@ -43,20 +56,12 @@ static inline void debugFilter(cv::InputArray original, cv::InputArray result,
} }
} }
#else #else
/**
* @brief Use the debug-framework to compare two images (from which the second
* is intended to be the result of
* a filter applied to the first).
*/
static inline void debugFilter(cv::InputArray, cv::InputArray, static inline void debugFilter(cv::InputArray, cv::InputArray,
impl::CallMetaData = impl::CallMetaData(), impl::CallMetaData = impl::CallMetaData(),
const char * = nullptr, const char * = nullptr) const char * = nullptr, const char * = nullptr)
{ {
} }
/**
* Dito.
*/
static inline void debugFilter(cv::InputArray, cv::InputArray, static inline void debugFilter(cv::InputArray, cv::InputArray,
impl::CallMetaData, const ::std::string &, impl::CallMetaData, const ::std::string &,
const ::std::string &) const ::std::string &)
...@@ -64,6 +69,8 @@ static inline void debugFilter(cv::InputArray, cv::InputArray, ...@@ -64,6 +69,8 @@ static inline void debugFilter(cv::InputArray, cv::InputArray,
} }
#endif #endif
//! @}
} // namespace cvv } // namespace cvv
#endif #endif
...@@ -6,22 +6,18 @@ ...@@ -6,22 +6,18 @@
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
void finalShow(); void finalShow();
} }
/** /** @brief Passes the control to the debug-window for a last time.
* @brief Passes the control to the debug-window for a last time.
* This function **must** be called *once* *after* all cvv calls if any. As an alternative create an
* This function must be called once if there was any prior debug-call. After that all debug-data instance of FinalShowCaller, which calls finalShow() in its destructor (RAII-style).
* are freed.
*
* If there was no prior call it may be called once in which case it returns
* without opening a window.
*
* In either case no further debug-calls must be made (undefined behaviour!!).
*
*/ */
inline void finalShow() inline void finalShow()
{ {
...@@ -48,6 +44,8 @@ public: ...@@ -48,6 +44,8 @@ public:
} }
}; };
//! @}
} }
#endif #endif
...@@ -8,9 +8,16 @@ ...@@ -8,9 +8,16 @@
#include "call_meta_data.hpp" #include "call_meta_data.hpp"
#include "debug_mode.hpp" #include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv namespace cvv
{ {
//! @addtogroup cvv
//! @{
namespace impl namespace impl
{ {
// implementation outside API // implementation outside API
...@@ -19,6 +26,15 @@ void showImage(cv::InputArray img, const CallMetaData &data, ...@@ -19,6 +26,15 @@ void showImage(cv::InputArray img, const CallMetaData &data,
} // namespace impl } // namespace impl
#ifdef CVVISUAL_DEBUGMODE #ifdef CVVISUAL_DEBUGMODE
/** @brief Add a single image to debug GUI (similar to imshow \<\>).
@param img Image to show in debug GUI.
@param metaData Properly initialized CallMetaData struct, i.e. information about file, line and
function name for GUI. Use CVVISUAL_LOCATION macro.
@param description Human readable description to provide context to image.
@param view Preselect view that will be used to visualize this image in GUI. Other views can still
be selected in GUI later on.
*/
static inline void showImage(cv::InputArray img, static inline void showImage(cv::InputArray img,
impl::CallMetaData metaData = impl::CallMetaData(), impl::CallMetaData metaData = impl::CallMetaData(),
const char *description = nullptr, const char *description = nullptr,
...@@ -29,6 +45,7 @@ static inline void showImage(cv::InputArray img, ...@@ -29,6 +45,7 @@ static inline void showImage(cv::InputArray img,
impl::showImage(img, metaData, description, view); impl::showImage(img, metaData, description, view);
} }
} }
/** @overload */
static inline void showImage(cv::InputArray img, impl::CallMetaData metaData, static inline void showImage(cv::InputArray img, impl::CallMetaData metaData,
const ::std::string &description, const ::std::string &description,
const ::std::string &view = "") const ::std::string &view = "")
...@@ -40,23 +57,19 @@ static inline void showImage(cv::InputArray img, impl::CallMetaData metaData, ...@@ -40,23 +57,19 @@ static inline void showImage(cv::InputArray img, impl::CallMetaData metaData,
} }
} }
#else #else
/**
* Use the debug-framework to show a single image.
*/
static inline void showImage(cv::InputArray, static inline void showImage(cv::InputArray,
impl::CallMetaData = impl::CallMetaData(), impl::CallMetaData = impl::CallMetaData(),
const char * = nullptr, const char * = nullptr) const char * = nullptr, const char * = nullptr)
{ {
} }
/**
* Dito.
*/
static inline void showImage(cv::InputArray, impl::CallMetaData, static inline void showImage(cv::InputArray, impl::CallMetaData,
const ::std::string &, const ::std::string &) const ::std::string &, const ::std::string &)
{ {
} }
#endif #endif
//! @}
} // namespace cvv } // namespace cvv
#endif #endif
...@@ -15,105 +15,105 @@ It is planned to have: ...@@ -15,105 +15,105 @@ It is planned to have:
.. toctree:: .. toctree::
:hidden: :hidden:
datasets/ar_hmdb ar_hmdb
datasets/ar_sports ar_sports
datasets/fr_adience fr_adience
datasets/fr_lfw fr_lfw
datasets/gr_chalearn gr_chalearn
datasets/gr_skig gr_skig
datasets/hpe_humaneva hpe_humaneva
datasets/hpe_parse hpe_parse
datasets/ir_affine ir_affine
datasets/ir_robot ir_robot
datasets/is_bsds is_bsds
datasets/is_weizmann is_weizmann
datasets/msm_epfl msm_epfl
datasets/msm_middlebury msm_middlebury
datasets/or_imagenet or_imagenet
datasets/or_mnist or_mnist
datasets/or_sun or_sun
datasets/pd_caltech pd_caltech
datasets/slam_kitti slam_kitti
datasets/slam_tumindoor slam_tumindoor
datasets/tr_chars tr_chars
datasets/tr_svt tr_svt
Action Recognition Action Recognition
------------------ ------------------
:doc:`datasets/ar_hmdb` [#f1]_ :doc:`ar_hmdb` [#f1]_
:doc:`datasets/ar_sports` :doc:`ar_sports`
Face Recognition Face Recognition
---------------- ----------------
:doc:`datasets/fr_adience` :doc:`fr_adience`
:doc:`datasets/fr_lfw` [#f1]_ :doc:`fr_lfw` [#f1]_
Gesture Recognition Gesture Recognition
------------------- -------------------
:doc:`datasets/gr_chalearn` :doc:`gr_chalearn`
:doc:`datasets/gr_skig` :doc:`gr_skig`
Human Pose Estimation Human Pose Estimation
--------------------- ---------------------
:doc:`datasets/hpe_humaneva` :doc:`hpe_humaneva`
:doc:`datasets/hpe_parse` :doc:`hpe_parse`
Image Registration Image Registration
------------------ ------------------
:doc:`datasets/ir_affine` :doc:`ir_affine`
:doc:`datasets/ir_robot` :doc:`ir_robot`
Image Segmentation Image Segmentation
------------------ ------------------
:doc:`datasets/is_bsds` :doc:`is_bsds`
:doc:`datasets/is_weizmann` :doc:`is_weizmann`
Multiview Stereo Matching Multiview Stereo Matching
------------------------- -------------------------
:doc:`datasets/msm_epfl` :doc:`msm_epfl`
:doc:`datasets/msm_middlebury` :doc:`msm_middlebury`
Object Recognition Object Recognition
------------------ ------------------
:doc:`datasets/or_imagenet` :doc:`or_imagenet`
:doc:`datasets/or_mnist` [#f2]_ :doc:`or_mnist` [#f2]_
:doc:`datasets/or_sun` :doc:`or_sun`
Pedestrian Detection Pedestrian Detection
-------------------- --------------------
:doc:`datasets/pd_caltech` [#f2]_ :doc:`pd_caltech` [#f2]_
SLAM SLAM
---- ----
:doc:`datasets/slam_kitti` :doc:`slam_kitti`
:doc:`datasets/slam_tumindoor` :doc:`slam_tumindoor`
Text Recognition Text Recognition
---------------- ----------------
:doc:`datasets/tr_chars` :doc:`tr_chars`
:doc:`datasets/tr_svt` [#f1]_ :doc:`tr_svt` [#f1]_
*Footnotes* *Footnotes*
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_ar
//! @{
struct AR_hmdbObj : public Object struct AR_hmdbObj : public Object
{ {
int id; int id;
...@@ -69,6 +72,8 @@ public: ...@@ -69,6 +72,8 @@ public:
static Ptr<AR_hmdb> create(); static Ptr<AR_hmdb> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_ar
//! @{
struct AR_sportsObj : public Object struct AR_sportsObj : public Object
{ {
std::string videoUrl; std::string videoUrl;
...@@ -68,6 +71,8 @@ public: ...@@ -68,6 +71,8 @@ public:
static Ptr<AR_sports> create(); static Ptr<AR_sports> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_fr
//! @{
enum genderType enum genderType
{ {
male = 0, male = 0,
...@@ -87,6 +90,8 @@ public: ...@@ -87,6 +90,8 @@ public:
std::vector<std::string> paths; std::vector<std::string> paths;
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_fr
//! @{
struct FR_lfwObj : public Object struct FR_lfwObj : public Object
{ {
std::string image1, image2; std::string image1, image2;
...@@ -68,6 +71,8 @@ public: ...@@ -68,6 +71,8 @@ public:
static Ptr<FR_lfw> create(); static Ptr<FR_lfw> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_gr
//! @{
struct groundTruth struct groundTruth
{ {
int gestureID, initialFrame, lastFrame; int gestureID, initialFrame, lastFrame;
...@@ -85,6 +88,8 @@ public: ...@@ -85,6 +88,8 @@ public:
static Ptr<GR_chalearn> create(); static Ptr<GR_chalearn> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_gr
//! @{
enum actionType enum actionType
{ {
circle = 1, circle = 1,
...@@ -107,6 +110,8 @@ public: ...@@ -107,6 +110,8 @@ public:
static Ptr<GR_skig> create(); static Ptr<GR_skig> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_hpe
//! @{
struct HPE_humanevaObj : public Object struct HPE_humanevaObj : public Object
{ {
char person; // 1..4 char person; // 1..4
...@@ -79,6 +82,8 @@ public: ...@@ -79,6 +82,8 @@ public:
static Ptr<HPE_humaneva> create(int num=humaneva_1); static Ptr<HPE_humaneva> create(int num=humaneva_1);
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_hpe
//! @{
struct HPE_parseObj : public Object struct HPE_parseObj : public Object
{ {
std::string name; std::string name;
...@@ -67,6 +70,8 @@ public: ...@@ -67,6 +70,8 @@ public:
static Ptr<HPE_parse> create(); static Ptr<HPE_parse> create();
}; };
//! @}
} }
} }
......
...@@ -55,6 +55,9 @@ namespace cv ...@@ -55,6 +55,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_ir
//! @{
struct IR_affineObj : public Object struct IR_affineObj : public Object
{ {
std::string imageName; std::string imageName;
...@@ -69,6 +72,8 @@ public: ...@@ -69,6 +72,8 @@ public:
static Ptr<IR_affine> create(); static Ptr<IR_affine> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_ir
//! @{
// calibration matrix from calibrationFile.mat // calibration matrix from calibrationFile.mat
// 2.8290e+03 0.0000e+00 8.0279e+02 // 2.8290e+03 0.0000e+00 8.0279e+02
// 0.0000e+00 2.8285e+03 6.1618e+02 // 0.0000e+00 2.8285e+03 6.1618e+02
...@@ -78,6 +81,8 @@ public: ...@@ -78,6 +81,8 @@ public:
static Ptr<IR_robot> create(); static Ptr<IR_robot> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_is
//! @{
struct IS_bsdsObj : public Object struct IS_bsdsObj : public Object
{ {
std::string name; std::string name;
...@@ -67,6 +70,8 @@ public: ...@@ -67,6 +70,8 @@ public:
static Ptr<IS_bsds> create(); static Ptr<IS_bsds> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_is
//! @{
struct IS_weizmannObj : public Object struct IS_weizmannObj : public Object
{ {
std::string imageName; std::string imageName;
...@@ -70,6 +73,8 @@ public: ...@@ -70,6 +73,8 @@ public:
static Ptr<IS_weizmann> create(); static Ptr<IS_weizmann> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_msm
//! @{
struct cameraParam struct cameraParam
{ {
Matx33d mat1; Matx33d mat1;
...@@ -79,6 +82,8 @@ public: ...@@ -79,6 +82,8 @@ public:
static Ptr<MSM_epfl> create(); static Ptr<MSM_epfl> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_msm
//! @{
struct MSM_middleburyObj : public Object struct MSM_middleburyObj : public Object
{ {
std::string imageName; std::string imageName;
...@@ -70,6 +73,8 @@ public: ...@@ -70,6 +73,8 @@ public:
static Ptr<MSM_middlebury> create(); static Ptr<MSM_middlebury> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_or
//! @{
struct OR_imagenetObj : public Object struct OR_imagenetObj : public Object
{ {
int id; int id;
...@@ -68,6 +71,8 @@ public: ...@@ -68,6 +71,8 @@ public:
static Ptr<OR_imagenet> create(); static Ptr<OR_imagenet> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_or
//! @{
struct OR_mnistObj : public Object struct OR_mnistObj : public Object
{ {
char label; // 0..9 char label; // 0..9
...@@ -68,6 +71,8 @@ public: ...@@ -68,6 +71,8 @@ public:
static Ptr<OR_mnist> create(); static Ptr<OR_mnist> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_or
//! @{
struct OR_sunObj : public Object struct OR_sunObj : public Object
{ {
int label; int label;
...@@ -70,6 +73,8 @@ public: ...@@ -70,6 +73,8 @@ public:
std::vector<std::string> paths; std::vector<std::string> paths;
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_pd
//! @{
struct PD_caltechObj : public Object struct PD_caltechObj : public Object
{ {
//double groundTrue[][]; //double groundTrue[][];
...@@ -78,6 +81,8 @@ public: ...@@ -78,6 +81,8 @@ public:
static Ptr<PD_caltech> create(); static Ptr<PD_caltech> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_slam
//! @{
struct pose struct pose
{ {
double elem[12]; double elem[12];
...@@ -76,6 +79,8 @@ public: ...@@ -76,6 +79,8 @@ public:
static Ptr<SLAM_kitti> create(); static Ptr<SLAM_kitti> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_slam
//! @{
enum imageType enum imageType
{ {
LEFT = 0, LEFT = 0,
...@@ -76,6 +79,8 @@ public: ...@@ -76,6 +79,8 @@ public:
static Ptr<SLAM_tumindoor> create(); static Ptr<SLAM_tumindoor> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_tr
//! @{
struct TR_charsObj : public Object struct TR_charsObj : public Object
{ {
std::string imgName; std::string imgName;
...@@ -68,6 +71,8 @@ public: ...@@ -68,6 +71,8 @@ public:
static Ptr<TR_chars> create(); static Ptr<TR_chars> create();
}; };
//! @}
} }
} }
......
...@@ -54,6 +54,9 @@ namespace cv ...@@ -54,6 +54,9 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets_tr
//! @{
struct tag struct tag
{ {
std::string value; std::string value;
...@@ -75,6 +78,8 @@ public: ...@@ -75,6 +78,8 @@ public:
static Ptr<TR_svt> create(); static Ptr<TR_svt> create();
}; };
//! @}
} }
} }
......
...@@ -57,12 +57,17 @@ namespace cv ...@@ -57,12 +57,17 @@ namespace cv
namespace datasets namespace datasets
{ {
//! @addtogroup datasets
//! @{
void CV_EXPORTS split(const std::string &s, std::vector<std::string> &elems, char delim); void CV_EXPORTS split(const std::string &s, std::vector<std::string> &elems, char delim);
void CV_EXPORTS createDirectory(const std::string &path); void CV_EXPORTS createDirectory(const std::string &path);
void CV_EXPORTS getDirList(const std::string &dirName, std::vector<std::string> &fileNames); void CV_EXPORTS getDirList(const std::string &dirName, std::vector<std::string> &fileNames);
//! @}
} }
} }
......
Face module changelog {#face_changelog}
=====================
Release 0.05
------------
This library is now included in the official OpenCV distribution (from 2.4 on). The
cv::FaceRecognizer is now an Algorithm, which better fits into the overall OpenCV API.
To reduce the confusion on user side and minimize my work, libfacerec and OpenCV have been
synchronized and are now based on the same interfaces and implementation.
The library now has an extensive documentation:
- The API is explained in detail and with a lot of code examples.
- The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to
the new OpenCV C++ cv::FaceRecognizer.
- A tutorial for gender classification with Fisherfaces.
- A tutorial for face recognition in videos (e.g. webcam).
### Release highlights
- There are no single highlights to pick from, this release is a highlight itself.
Release 0.04
------------
This version is fully Windows-compatible and works with OpenCV 2.3.1. Several bugfixes, but none
influenced the recognition rate.
### Release highlights
- A whole lot of exceptions with meaningful error messages.
- A tutorial for Windows users:
[<http://bytefish.de/blog/opencv_visual_studio_and_libfacerec>](http://bytefish.de/blog/opencv_visual_studio_and_libfacerec)
Release 0.03
------------
Reworked the library to provide separate implementations in cpp files, because it's the preferred
way of contributing OpenCV libraries. This means the library is not header-only anymore. Slight API
changes were done, please see the documentation for details.
### Release highlights
- New Unit Tests (for LBP Histograms) make the library more robust.
- Added more documentation.
Release 0.02
------------
Reworked the library to provide separate implementations in cpp files, because it's the preferred
way of contributing OpenCV libraries. This means the library is not header-only anymore. Slight API
changes were done, please see the documentation for details.
### Release highlights
- New Unit Tests (for LBP Histograms) make the library more robust.
- Added a documentation and changelog in reStructuredText.
Release 0.01
------------
Initial release as header-only library.
### Release highlights
- Colormaps for OpenCV to enhance the visualization.
- Face Recognition algorithms implemented:
- Eigenfaces @cite TP91
- Fisherfaces @cite BHK97
- Local Binary Patterns Histograms @cite AHP04
- Added persistence facilities to store the models with a common API.
- Unit Tests (using [gtest](http://code.google.com/p/googletest/)).
- Providing a CMakeLists.txt to enable easy cross-platform building.
@incollection{AHP04,
title={Face recognition with local binary patterns},
author={Ahonen, Timo and Hadid, Abdenour and Pietik{\"a}inen, Matti},
booktitle={Computer vision-eccv 2004},
pages={469--481},
year={2004},
publisher={Springer}
}
@article{BHK97,
title={Eigenfaces vs. fisherfaces: Recognition using class specific linear projection},
author={Belhumeur, Peter N. and Hespanha, Jo{\~a}o P and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={19},
number={7},
pages={711--720},
year={1997},
publisher={IEEE}
}
@inproceedings{Bru92,
title={Face recognition through geometrical features},
author={Brunelli, Roberto and Poggio, Tomaso},
booktitle={Computer Vision—ECCV'92},
pages={792--800},
year={1992},
organization={Springer}
}
@book{Duda01,
title={Pattern classification},
author={Duda, Richard O and Hart, Peter E and Stork, David G},
year={2012},
publisher={John Wiley \& Sons}
}
@article{Fisher36,
title={The use of multiple measurements in taxonomic problems},
author={Fisher, Ronald A},
journal={Annals of eugenics},
volume={7},
number={2},
pages={179--188},
year={1936},
publisher={Wiley Online Library}
}
@article{GBK01,
title={From few to many: Illumination cone models for face recognition under variable lighting and pose},
author={Georghiades, Athinodoros S. and Belhumeur, Peter N. and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={23},
number={6},
pages={643--660},
year={2001},
publisher={IEEE}
}
@article{Kanade73,
title={Picture processing system by computer complex and recognition of human faces},
author={Kanade, Takeo},
year={1974}
}
@article{KM01,
title={Pca versus lda},
author={Mart{\'\i}nez, Aleix M and Kak, Avinash C},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={23},
number={2},
pages={228--233},
year={2001},
publisher={IEEE}
}
@article{Lee05,
title={Acquiring linear subspaces for face recognition under variable lighting},
author={Lee, Kuang-Chih and Ho, Jeffrey and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={27},
number={5},
pages={684--698},
year={2005},
publisher={IEEE}
}
@incollection{Messer06,
title={Performance characterisation of face recognition algorithms and their sensitivity to severe illumination changes},
author={Messer, Kieron and Kittler, Josef and Short, James and Heusch, Guillaume and Cardinaux, Fabien and Marcel, Sebastien and Rodriguez, Yann and Shan, Shiguang and Su, Yu and Gao, Wen and others},
booktitle={Advances in Biometrics},
pages={1--11},
year={2005},
publisher={Springer}
}
@article{RJ91,
title={Small sample size effects in statistical pattern recognition: Recommendations for practitioners},
author={Raudys, Sarunas J and Jain, Anil K.},
journal={IEEE Transactions on pattern analysis and machine intelligence},
volume={13},
number={3},
pages={252--264},
year={1991},
publisher={IEEE Computer Society}
}
@article{Tan10,
title={Enhanced local texture feature sets for face recognition under difficult lighting conditions},
author={Tan, Xiaoyang and Triggs, Bill},
journal={Image Processing, IEEE Transactions on},
volume={19},
number={6},
pages={1635--1650},
year={2010},
publisher={IEEE}
}
@article{TP91,
title={Eigenfaces for recognition},
author={Turk, Matthew and Pentland, Alex},
journal={Journal of cognitive neuroscience},
volume={3},
number={1},
pages={71--86},
year={1991},
publisher={MIT Press}
}
@article{Tu06,
title={Newborns' face recognition: Role of inner and outer facial features},
author={Turati, Chiara and Macchi Cassia, Viola and Simion, Francesca and Leo, Irene},
journal={Child development},
volume={77},
number={2},
pages={297--311},
year={2006},
publisher={Wiley Online Library}
}
@article{Wiskott97,
title={Face recognition by elastic bunch graph matching},
author={Wiskott, Laurenz and Fellous, J-M and Kuiger, N and Von Der Malsburg, Christoph},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={19},
number={7},
pages={775--779},
year={1997},
publisher={IEEE}
}
@article{Zhao03,
title={Face recognition: A literature survey},
author={Zhao, Wenyi and Chellappa, Rama and Phillips, P Jonathon and Rosenfeld, Azriel},
journal={Acm Computing Surveys (CSUR)},
volume={35},
number={4},
pages={399--458},
year={2003},
publisher={ACM}
}
...@@ -7,4 +7,4 @@ The module contains some recently added functionality that has not been stabiliz ...@@ -7,4 +7,4 @@ The module contains some recently added functionality that has not been stabiliz
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
FaceRecognizer Documentation <facerec/index> FaceRecognizer Documentation <index>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment