Commit a20c5c8d authored by Maksim Shabunin's avatar Maksim Shabunin

Doxygen documentation for all modules

parent 525c4d5e
......@@ -38,3 +38,7 @@ or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
/** @defgroup adas Advanced Driver Assistance
*/
@incollection{KB2001,
title={An improved adaptive background mixture model for real-time tracking with shadow detection},
author={KaewTraKulPong, Pakorn and Bowden, Richard},
booktitle={Video-Based Surveillance Systems},
pages={135--144},
year={2002},
publisher={Springer}
}
@inproceedings{Gold2012,
title={Visual tracking of human visitors under variable-lighting conditions for a responsive audio art installation},
author={Godbehere, Andrew B and Matsukawa, Akihiro and Goldberg, Ken},
booktitle={American Control Conference (ACC), 2012},
pages={4305--4312},
year={2012},
organization={IEEE}
}
......@@ -44,21 +44,21 @@ the use of this software, even if advised of the possibility of such damage.
#ifdef __cplusplus
/** @defgroup bgsegm Improved Background-Foreground Segmentation Methods
*/
namespace cv
{
namespace bgsegm
{
/*!
Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
//! @addtogroup bgsegm
//! @{
The class implements the following algorithm:
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
*/
The class implements the algorithm described in @cite KB2001.
*/
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{
public:
......@@ -75,54 +75,118 @@ public:
CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
};
/** @brief Creates mixture-of-gaussian background subtractor
@param history Length of the history.
@param nmixtures Number of Gaussian mixtures.
@param backgroundRatio Background ratio.
@param noiseSigma Noise strength (standard deviation of the brightness or each color channel). 0
means some automatic value.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
/** @brief Background Subtractor module based on the algorithm given in @cite Gold2012.
Takes a series of images and returns a sequence of mask (8UC1)
images of the same size, where 255 indicates Foreground and 0 represents Background.
This class implements an algorithm described in "Visual Tracking of Human Visitors under
Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/
class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor
{
public:
/** @brief Returns total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual int getMaxFeatures() const = 0;
/** @brief Sets total number of distinct colors to maintain in histogram.
*/
CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
/** @brief Returns the learning rate of the algorithm.
It lies between 0.0 and 1.0. It determines how quickly features are "forgotten" from
histograms.
*/
CV_WRAP virtual double getDefaultLearningRate() const = 0;
/** @brief Sets the learning rate of the algorithm.
*/
CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;
/** @brief Returns the number of frames used to initialize background model.
*/
CV_WRAP virtual int getNumFrames() const = 0;
/** @brief Sets the number of frames used to initialize background model.
*/
CV_WRAP virtual void setNumFrames(int nframes) = 0;
/** @brief Returns the parameter used for quantization of color-space.
It is the number of discrete levels in each channel to be used in histograms.
*/
CV_WRAP virtual int getQuantizationLevels() const = 0;
/** @brief Sets the parameter used for quantization of color-space
*/
CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;
/** @brief Returns the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual double getBackgroundPrior() const = 0;
/** @brief Sets the prior probability that each individual pixel is a background pixel.
*/
CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;
/** @brief Returns the kernel radius used for morphological operations
*/
CV_WRAP virtual int getSmoothingRadius() const = 0;
/** @brief Sets the kernel radius used for morphological operations
*/
CV_WRAP virtual void setSmoothingRadius(int radius) = 0;
/** @brief Returns the value of decision threshold.
Decision value is the value above which pixel is determined to be FG.
*/
CV_WRAP virtual double getDecisionThreshold() const = 0;
/** @brief Sets the value of decision threshold.
*/
CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;
/** @brief Returns the status of background model update
*/
CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;
/** @brief Sets the status of background model update
*/
CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;
/** @brief Returns the minimum value taken on by pixels in image sequence. Usually 0.
*/
CV_WRAP virtual double getMinVal() const = 0;
/** @brief Sets the minimum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMinVal(double val) = 0;
/** @brief Returns the maximum value taken on by pixels in image sequence. e.g. 1.0 or 255.
*/
CV_WRAP virtual double getMaxVal() const = 0;
/** @brief Sets the maximum value taken on by pixels in image sequence.
*/
CV_WRAP virtual void setMaxVal(double val) = 0;
};
/** @brief Creates a GMG Background Subtractor
@param initializationFrames number of frames used to initialize the background models.
@param decisionThreshold Threshold value, above which it is marked foreground, else background.
*/
CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8);
//! @}
}
}
......
@article{Benoit2010,
title={Using human visual system modeling for bio-inspired low level image processing},
author={Benoit, Alexandre and Caplier, Alice and Durette, Barth{\'e}l{\'e}my and H{\'e}rault, Jeanny},
journal={Computer vision and Image understanding},
volume={114},
number={7},
pages={758--773},
year={2010},
publisher={Elsevier}
}
@inproceedings{Strat2013,
title={Retina enhanced SIFT descriptors for video indexing},
author={Strat, Sabin Tiberius and Benoit, Alexandre and Lambert, Patrick},
booktitle={Content-Based Multimedia Indexing (CBMI), 2013 11th International Workshop on},
pages={201--206},
year={2013},
organization={IEEE}
}
@book{Herault2010,
title={Vision: Images, Signals and Neural Networks-Models of Neural Processing in Visual Perception},
author={Jeanny, Herault},
year={2010},
publisher={World Scientific}
}
@inproceedings{Chaix2007,
title={Efficient demosaicing through recursive filtering},
author={De Lavar{\`e}ne, Brice Chaix and Alleysson, David and Durette, Barth{\'e}l{\'e}my and H{\'e}rault, Jeanny},
booktitle={Image Processing, 2007. ICIP 2007. IEEE International Conference on},
volume={2},
pages={II--189},
year={2007},
organization={IEEE}
}
@article{Meylan2007,
title={Model of retinal local adaptation for the tone mapping of color filter array images},
author={Meylan, Laurence and Alleysson, David and S{\"u}sstrunk, Sabine},
journal={JOSA A},
volume={24},
number={9},
pages={2807--2816},
year={2007},
publisher={Optical Society of America}
}
......@@ -7,4 +7,4 @@ The module provides biological visual systems models (human visual system and ot
.. toctree::
:maxdepth: 2
Human retina documentation <retina/index>
Human retina documentation <retina>
This diff is collapsed.
......@@ -47,4 +47,14 @@
#include "opencv2/bioinspired/retina.hpp"
#include "opencv2/bioinspired/retinafasttonemapping.hpp"
#include "opencv2/bioinspired/transientareassegmentationmodule.hpp"
/** @defgroup bioinspired Biologically inspired vision models and derivated tools
The module provides biological visual systems models (human visual system and others). It also
provides derivated objects that take advantage of those bio-inspired models.
@ref bioinspired_retina
*/
#endif
......@@ -67,11 +67,10 @@
#ifndef __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
#define __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__
/*
* retinafasttonemapping.hpp
*
* Created on: May 26, 2013
* Author: Alexandre Benoit
/**
@file
@date May 26, 2013
@author Alexandre Benoit
*/
#include "opencv2/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
......@@ -79,43 +78,61 @@
namespace cv{
namespace bioinspired{
/**
* a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV.
* This algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.
* As a summary, these are the model properties:
* => 2 stages of local luminance adaptation with a different local neighborhood for each.
* => first stage models the retina photorecetors local luminance adaptation
* => second stage models th ganglion cells local information adaptation
* => compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters.
* ====> this can help noise robustness and temporal stability for video sequence use cases.
* for more information, read to the following papers :
* Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
* regarding spatio-temporal filter and the bigger retina model :
* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
*/
//! @addtogroup bioinspired
//! @{
/** @brief a wrapper class which allows the tone mapping algorithm of Meylan&al(2007) to be used with OpenCV.
This algorithm is already implemented in thre Retina class (retina::applyFastToneMapping) but used it does not require all the retina model to be allocated. This allows a light memory use for low memory devices (smartphones, etc.
As a summary, these are the model properties:
- 2 stages of local luminance adaptation with a different local neighborhood for each.
- first stage models the retina photorecetors local luminance adaptation
- second stage models th ganglion cells local information adaptation
- compared to the initial publication, this class uses spatio-temporal low pass filters instead of spatial only filters.
this can help noise robustness and temporal stability for video sequence use cases.
for more information, read to the following papers :
Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
regarding spatio-temporal filter and the bigger retina model :
Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
*/
class CV_EXPORTS_W RetinaFastToneMapping : public Algorithm
{
public:
/**
* method that applies a luminance correction (initially High Dynamic Range (HDR) tone mapping) using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal smoothing and eventually high frequencies attenuation. This is a lighter method than the one available using the regular retina::run method. It is then faster but it does not include complete temporal filtering nor retina spectral whitening. Then, it can have a more limited effect on images with a very high dynamic range. This is an adptation of the original still image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's work, please cite:
* -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816
@param inputImage the input image to process RGB or gray levels
@param outputToneMappedImage the output tone mapped image
*/
/** @brief applies a luminance correction (initially High Dynamic Range (HDR) tone mapping)
using only the 2 local adaptation stages of the retina parvocellular channel : photoreceptors
level and ganlion cells level. Spatio temporal filtering is applied but limited to temporal
smoothing and eventually high frequencies attenuation. This is a lighter method than the one
available using the regular retina::run method. It is then faster but it does not include
complete temporal filtering nor retina spectral whitening. Then, it can have a more limited
effect on images with a very high dynamic range. This is an adptation of the original still
image HDR tone mapping algorithm of David Alleyson, Sabine Susstruck and Laurence Meylan's
work, please cite: -> Meylan L., Alleysson D., and Susstrunk S., A Model of Retinal Local
Adaptation for the Tone Mapping of Color Filter Array Images, Journal of Optical Society of
America, A, Vol. 24, N 9, September, 1st, 2007, pp. 2807-2816
@param inputImage the input image to process RGB or gray levels
@param outputToneMappedImage the output tone mapped image
*/
CV_WRAP virtual void applyFastToneMapping(InputArray inputImage, OutputArray outputToneMappedImage)=0;
/**
* setup method that updates tone mapping behaviors by adjusing the local luminance computation area
* @param photoreceptorsNeighborhoodRadius the first stage local adaptation area
* @param ganglioncellsNeighborhoodRadius the second stage local adaptation area
* @param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information (default is 1, see reference paper)
/** @brief updates tone mapping behaviors by adjusing the local luminance computation area
@param photoreceptorsNeighborhoodRadius the first stage local adaptation area
@param ganglioncellsNeighborhoodRadius the second stage local adaptation area
@param meanLuminanceModulatorK the factor applied to modulate the meanLuminance information
(default is 1, see reference paper)
*/
CV_WRAP virtual void setup(const float photoreceptorsNeighborhoodRadius=3.f, const float ganglioncellsNeighborhoodRadius=1.f, const float meanLuminanceModulatorK=1.f)=0;
};
//! @relates bioinspired::RetinaFastToneMapping
CV_EXPORTS_W Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize);
//! @}
}
}
#endif /* __OPENCV_BIOINSPIRED_RETINAFASTTONEMAPPING_HPP__ */
......@@ -49,8 +49,14 @@
#include <vector>
/** @defgroup ccalib Custom Calibration Pattern for 3D reconstruction
*/
namespace cv{ namespace ccalib{
//! @addtogroup ccalib
//! @{
class CV_EXPORTS CustomPattern : public Algorithm
{
public:
......@@ -66,11 +72,11 @@ public:
bool isInitialized();
void getPatternPoints(OutputArray original_points);
/*
/**<
Returns a vector<Point> of the original points.
*/
double getPixelSize();
/*
/**<
Get the pixel size of the pattern
*/
......@@ -86,7 +92,7 @@ public:
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/*
/**<
Calls the calirateCamera function with the same inputs.
*/
......@@ -94,7 +100,7 @@ public:
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
bool findRt(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE);
/*
/**<
Uses solvePnP to find the rotation and translation of the pattern
with respect to the camera frame.
*/
......@@ -105,13 +111,13 @@ public:
bool findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, int minInliersCount = 100, OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE);
/*
/**<
Uses solvePnPRansac()
*/
void drawOrientation(InputOutputArray image, InputArray tvec, InputArray rvec, InputArray cameraMatrix,
InputArray distCoeffs, double axis_length = 3, int axis_width = 2);
/*
/**<
pattern_corners -> projected over the image position of the edges of the pattern.
*/
......@@ -144,6 +150,8 @@ private:
void refineKeypointsPos(const Mat& img, std::vector<KeyPoint>& kp);
};
//! @}
}} // namespace ccalib, cv
#endif
......@@ -7,5 +7,5 @@ The module provides an interactive GUI to debug and incrementally design compute
.. toctree::
:maxdepth: 2
CVV API Documentation <cvv_api/index>
CVV GUI Documentation <cvv_gui/index>
CVV API Documentation <cvv_api>
CVV GUI Documentation <cvv_gui>
......@@ -7,6 +7,10 @@
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
......@@ -49,6 +53,9 @@ struct CallMetaData
const bool isKnown;
};
}
//! @}
} // namespaces
#ifdef __GNUC__
......
/**
@defgroup cvv GUI for Interactive Visual Debugging of Computer Vision Programs
Namespace for all functions is **cvv**, i.e. *cvv::showImage()*.
Compilation:
- For development, i.e. for cvv GUI to show up, compile your code using cvv with
*g++ -DCVVISUAL_DEBUGMODE*.
- For release, i.e. cvv calls doing nothing, compile your code without above flag.
See cvv tutorial for a commented example application using cvv.
*/
#include <opencv2/cvv/call_meta_data.hpp>
#include <opencv2/cvv/debug_mode.hpp>
#include <opencv2/cvv/dmatch.hpp>
#include <opencv2/cvv/filter.hpp>
#include <opencv2/cvv/final_show.hpp>
#include <opencv2/cvv/show_image.hpp>
......@@ -10,6 +10,9 @@
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
......@@ -24,22 +27,25 @@ static inline bool &getDebugFlag()
} // namespace impl
/**
* @brief Returns whether debug-mode is active for this TU and thread.
*/
/** @brief Returns whether debug-mode is active for this TU and thread.
*/
static inline bool debugMode()
{
return impl::getDebugFlag();
}
/**
* @brief Set the debug-mode for this TU and thread.
/** @brief Enable or disable cvv for current translation unit and thread
(disabled this way has higher - but still low - overhead compared to using the compile flags).
@param active
*/
static inline void setDebugFlag(bool active)
{
impl::getDebugFlag() = active;
}
//! @}
} // namespace cvv
#endif
......@@ -9,9 +9,16 @@
#include "call_meta_data.hpp"
#include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
......@@ -22,6 +29,22 @@ void debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
} // namespace impl
#ifdef CVVISUAL_DEBUGMODE
/** @brief Add a filled in DMatch \<dmatch\> to debug GUI.
The matches can are visualized for interactive inspection in different GUI views (one similar to an
interactive :draw_matches:drawMatches\<\>).
@param img1 First image used in DMatch \<dmatch\>.
@param keypoints1 Keypoints of first image.
@param img2 Second image used in DMatch.
@param keypoints2 Keypoints of second image.
@param matches
@param data See showImage
@param description See showImage
@param view See showImage
@param useTrainDescriptor Use DMatch \<dmatch\>'s train descriptor index instead of query
descriptor index.
*/
static inline void
debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2,
......@@ -36,6 +59,7 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
data, description, view, useTrainDescriptor);
}
}
/** @overload */
static inline void
debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
cv::InputArray img2, std::vector<cv::KeyPoint> keypoints2,
......@@ -52,9 +76,6 @@ debugDMatch(cv::InputArray img1, std::vector<cv::KeyPoint> keypoints1,
}
}
#else
/**
* @brief Debug a set of matches between two images.
*/
static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
cv::InputArray, std::vector<cv::KeyPoint>,
std::vector<cv::DMatch>,
......@@ -63,9 +84,6 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
bool = true)
{
}
/**
* Dito.
*/
static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
cv::InputArray, std::vector<cv::KeyPoint>,
std::vector<cv::DMatch>,
......@@ -75,6 +93,8 @@ static inline void debugDMatch(cv::InputArray, std::vector<cv::KeyPoint>,
}
#endif
//! @}
} // namespace cvv
#endif
......@@ -8,9 +8,16 @@
#include "call_meta_data.hpp"
#include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
// implementation outside API
......@@ -20,6 +27,11 @@ void debugFilter(cv::InputArray original, cv::InputArray result,
} // namespace impl
#ifdef CVVISUAL_DEBUGMODE
/**
* @brief Use the debug-framework to compare two images (from which the second
* is intended to be the result of
* a filter applied to the first).
*/
static inline void
debugFilter(cv::InputArray original, cv::InputArray result,
impl::CallMetaData metaData = impl::CallMetaData(),
......@@ -31,6 +43,7 @@ debugFilter(cv::InputArray original, cv::InputArray result,
view);
}
}
/** @overload */
static inline void debugFilter(cv::InputArray original, cv::InputArray result,
impl::CallMetaData metaData,
const ::std::string &description,
......@@ -43,20 +56,12 @@ static inline void debugFilter(cv::InputArray original, cv::InputArray result,
}
}
#else
/**
* @brief Use the debug-framework to compare two images (from which the second
* is intended to be the result of
* a filter applied to the first).
*/
static inline void debugFilter(cv::InputArray, cv::InputArray,
impl::CallMetaData = impl::CallMetaData(),
const char * = nullptr, const char * = nullptr)
{
}
/**
* Dito.
*/
static inline void debugFilter(cv::InputArray, cv::InputArray,
impl::CallMetaData, const ::std::string &,
const ::std::string &)
......@@ -64,6 +69,8 @@ static inline void debugFilter(cv::InputArray, cv::InputArray,
}
#endif
//! @}
} // namespace cvv
#endif
......@@ -6,22 +6,18 @@
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
void finalShow();
}
/**
* @brief Passes the control to the debug-window for a last time.
*
* This function must be called once if there was any prior debug-call. After that all debug-data
* are freed.
*
* If there was no prior call it may be called once in which case it returns
* without opening a window.
*
* In either case no further debug-calls must be made (undefined behaviour!!).
*
/** @brief Passes the control to the debug-window for a last time.
This function **must** be called *once* *after* all cvv calls if any. As an alternative create an
instance of FinalShowCaller, which calls finalShow() in its destructor (RAII-style).
*/
inline void finalShow()
{
......@@ -48,6 +44,8 @@ public:
}
};
//! @}
}
#endif
......@@ -8,9 +8,16 @@
#include "call_meta_data.hpp"
#include "debug_mode.hpp"
#ifdef CV_DOXYGEN
#define CVVISUAL_DEBUGMODE
#endif
namespace cvv
{
//! @addtogroup cvv
//! @{
namespace impl
{
// implementation outside API
......@@ -19,6 +26,15 @@ void showImage(cv::InputArray img, const CallMetaData &data,
} // namespace impl
#ifdef CVVISUAL_DEBUGMODE
/** @brief Add a single image to debug GUI (similar to imshow \<\>).
@param img Image to show in debug GUI.
@param metaData Properly initialized CallMetaData struct, i.e. information about file, line and
function name for GUI. Use CVVISUAL_LOCATION macro.
@param description Human readable description to provide context to image.
@param view Preselect view that will be used to visualize this image in GUI. Other views can still
be selected in GUI later on.
*/
static inline void showImage(cv::InputArray img,
impl::CallMetaData metaData = impl::CallMetaData(),
const char *description = nullptr,
......@@ -29,6 +45,7 @@ static inline void showImage(cv::InputArray img,
impl::showImage(img, metaData, description, view);
}
}
/** @overload */
static inline void showImage(cv::InputArray img, impl::CallMetaData metaData,
const ::std::string &description,
const ::std::string &view = "")
......@@ -40,23 +57,19 @@ static inline void showImage(cv::InputArray img, impl::CallMetaData metaData,
}
}
#else
/**
* Use the debug-framework to show a single image.
*/
static inline void showImage(cv::InputArray,
impl::CallMetaData = impl::CallMetaData(),
const char * = nullptr, const char * = nullptr)
{
}
/**
* Dito.
*/
static inline void showImage(cv::InputArray, impl::CallMetaData,
const ::std::string &, const ::std::string &)
{
}
#endif
//! @}
} // namespace cvv
#endif
......@@ -15,105 +15,105 @@ It is planned to have:
.. toctree::
:hidden:
datasets/ar_hmdb
datasets/ar_sports
datasets/fr_adience
datasets/fr_lfw
datasets/gr_chalearn
datasets/gr_skig
datasets/hpe_humaneva
datasets/hpe_parse
datasets/ir_affine
datasets/ir_robot
datasets/is_bsds
datasets/is_weizmann
datasets/msm_epfl
datasets/msm_middlebury
datasets/or_imagenet
datasets/or_mnist
datasets/or_sun
datasets/pd_caltech
datasets/slam_kitti
datasets/slam_tumindoor
datasets/tr_chars
datasets/tr_svt
ar_hmdb
ar_sports
fr_adience
fr_lfw
gr_chalearn
gr_skig
hpe_humaneva
hpe_parse
ir_affine
ir_robot
is_bsds
is_weizmann
msm_epfl
msm_middlebury
or_imagenet
or_mnist
or_sun
pd_caltech
slam_kitti
slam_tumindoor
tr_chars
tr_svt
Action Recognition
------------------
:doc:`datasets/ar_hmdb` [#f1]_
:doc:`ar_hmdb` [#f1]_
:doc:`datasets/ar_sports`
:doc:`ar_sports`
Face Recognition
----------------
:doc:`datasets/fr_adience`
:doc:`fr_adience`
:doc:`datasets/fr_lfw` [#f1]_
:doc:`fr_lfw` [#f1]_
Gesture Recognition
-------------------
:doc:`datasets/gr_chalearn`
:doc:`gr_chalearn`
:doc:`datasets/gr_skig`
:doc:`gr_skig`
Human Pose Estimation
---------------------
:doc:`datasets/hpe_humaneva`
:doc:`hpe_humaneva`
:doc:`datasets/hpe_parse`
:doc:`hpe_parse`
Image Registration
------------------
:doc:`datasets/ir_affine`
:doc:`ir_affine`
:doc:`datasets/ir_robot`
:doc:`ir_robot`
Image Segmentation
------------------
:doc:`datasets/is_bsds`
:doc:`is_bsds`
:doc:`datasets/is_weizmann`
:doc:`is_weizmann`
Multiview Stereo Matching
-------------------------
:doc:`datasets/msm_epfl`
:doc:`msm_epfl`
:doc:`datasets/msm_middlebury`
:doc:`msm_middlebury`
Object Recognition
------------------
:doc:`datasets/or_imagenet`
:doc:`or_imagenet`
:doc:`datasets/or_mnist` [#f2]_
:doc:`or_mnist` [#f2]_
:doc:`datasets/or_sun`
:doc:`or_sun`
Pedestrian Detection
--------------------
:doc:`datasets/pd_caltech` [#f2]_
:doc:`pd_caltech` [#f2]_
SLAM
----
:doc:`datasets/slam_kitti`
:doc:`slam_kitti`
:doc:`datasets/slam_tumindoor`
:doc:`slam_tumindoor`
Text Recognition
----------------
:doc:`datasets/tr_chars`
:doc:`tr_chars`
:doc:`datasets/tr_svt` [#f1]_
:doc:`tr_svt` [#f1]_
*Footnotes*
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_ar
//! @{
struct AR_hmdbObj : public Object
{
int id;
......@@ -69,6 +72,8 @@ public:
static Ptr<AR_hmdb> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_ar
//! @{
struct AR_sportsObj : public Object
{
std::string videoUrl;
......@@ -68,6 +71,8 @@ public:
static Ptr<AR_sports> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_fr
//! @{
enum genderType
{
male = 0,
......@@ -87,6 +90,8 @@ public:
std::vector<std::string> paths;
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_fr
//! @{
struct FR_lfwObj : public Object
{
std::string image1, image2;
......@@ -68,6 +71,8 @@ public:
static Ptr<FR_lfw> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_gr
//! @{
struct groundTruth
{
int gestureID, initialFrame, lastFrame;
......@@ -85,6 +88,8 @@ public:
static Ptr<GR_chalearn> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_gr
//! @{
enum actionType
{
circle = 1,
......@@ -107,6 +110,8 @@ public:
static Ptr<GR_skig> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_hpe
//! @{
struct HPE_humanevaObj : public Object
{
char person; // 1..4
......@@ -79,6 +82,8 @@ public:
static Ptr<HPE_humaneva> create(int num=humaneva_1);
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_hpe
//! @{
struct HPE_parseObj : public Object
{
std::string name;
......@@ -67,6 +70,8 @@ public:
static Ptr<HPE_parse> create();
};
//! @}
}
}
......
......@@ -55,6 +55,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_ir
//! @{
struct IR_affineObj : public Object
{
std::string imageName;
......@@ -69,6 +72,8 @@ public:
static Ptr<IR_affine> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_ir
//! @{
// calibration matrix from calibrationFile.mat
// 2.8290e+03 0.0000e+00 8.0279e+02
// 0.0000e+00 2.8285e+03 6.1618e+02
......@@ -78,6 +81,8 @@ public:
static Ptr<IR_robot> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_is
//! @{
struct IS_bsdsObj : public Object
{
std::string name;
......@@ -67,6 +70,8 @@ public:
static Ptr<IS_bsds> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_is
//! @{
struct IS_weizmannObj : public Object
{
std::string imageName;
......@@ -70,6 +73,8 @@ public:
static Ptr<IS_weizmann> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_msm
//! @{
struct cameraParam
{
Matx33d mat1;
......@@ -79,6 +82,8 @@ public:
static Ptr<MSM_epfl> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_msm
//! @{
struct MSM_middleburyObj : public Object
{
std::string imageName;
......@@ -70,6 +73,8 @@ public:
static Ptr<MSM_middlebury> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_or
//! @{
struct OR_imagenetObj : public Object
{
int id;
......@@ -68,6 +71,8 @@ public:
static Ptr<OR_imagenet> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_or
//! @{
struct OR_mnistObj : public Object
{
char label; // 0..9
......@@ -68,6 +71,8 @@ public:
static Ptr<OR_mnist> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_or
//! @{
struct OR_sunObj : public Object
{
int label;
......@@ -70,6 +73,8 @@ public:
std::vector<std::string> paths;
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_pd
//! @{
struct PD_caltechObj : public Object
{
//double groundTrue[][];
......@@ -78,6 +81,8 @@ public:
static Ptr<PD_caltech> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_slam
//! @{
struct pose
{
double elem[12];
......@@ -76,6 +79,8 @@ public:
static Ptr<SLAM_kitti> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_slam
//! @{
enum imageType
{
LEFT = 0,
......@@ -76,6 +79,8 @@ public:
static Ptr<SLAM_tumindoor> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_tr
//! @{
struct TR_charsObj : public Object
{
std::string imgName;
......@@ -68,6 +71,8 @@ public:
static Ptr<TR_chars> create();
};
//! @}
}
}
......
......@@ -54,6 +54,9 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets_tr
//! @{
struct tag
{
std::string value;
......@@ -75,6 +78,8 @@ public:
static Ptr<TR_svt> create();
};
//! @}
}
}
......
......@@ -57,12 +57,17 @@ namespace cv
namespace datasets
{
//! @addtogroup datasets
//! @{
void CV_EXPORTS split(const std::string &s, std::vector<std::string> &elems, char delim);
void CV_EXPORTS createDirectory(const std::string &path);
void CV_EXPORTS getDirList(const std::string &dirName, std::vector<std::string> &fileNames);
//! @}
}
}
......
Face module changelog {#face_changelog}
=====================
Release 0.05
------------
This library is now included in the official OpenCV distribution (from 2.4 on). The
cv::FaceRecognizer is now an Algorithm, which better fits into the overall OpenCV API.
To reduce the confusion on user side and minimize my work, libfacerec and OpenCV have been
synchronized and are now based on the same interfaces and implementation.
The library now has an extensive documentation:
- The API is explained in detail and with a lot of code examples.
- The face recognition guide I had written for Python and GNU Octave/MATLAB has been adapted to
the new OpenCV C++ cv::FaceRecognizer.
- A tutorial for gender classification with Fisherfaces.
- A tutorial for face recognition in videos (e.g. webcam).
### Release highlights
- There are no single highlights to pick from, this release is a highlight itself.
Release 0.04
------------
This version is fully Windows-compatible and works with OpenCV 2.3.1. Several bugfixes, but none
influenced the recognition rate.
### Release highlights
- A whole lot of exceptions with meaningful error messages.
- A tutorial for Windows users:
[<http://bytefish.de/blog/opencv_visual_studio_and_libfacerec>](http://bytefish.de/blog/opencv_visual_studio_and_libfacerec)
Release 0.03
------------
Reworked the library to provide separate implementations in cpp files, because it's the preferred
way of contributing OpenCV libraries. This means the library is not header-only anymore. Slight API
changes were done, please see the documentation for details.
### Release highlights
- New Unit Tests (for LBP Histograms) make the library more robust.
- Added more documentation.
Release 0.02
------------
Reworked the library to provide separate implementations in cpp files, because it's the preferred
way of contributing OpenCV libraries. This means the library is not header-only anymore. Slight API
changes were done, please see the documentation for details.
### Release highlights
- New Unit Tests (for LBP Histograms) make the library more robust.
- Added a documentation and changelog in reStructuredText.
Release 0.01
------------
Initial release as header-only library.
### Release highlights
- Colormaps for OpenCV to enhance the visualization.
- Face Recognition algorithms implemented:
- Eigenfaces @cite TP91
- Fisherfaces @cite BHK97
- Local Binary Patterns Histograms @cite AHP04
- Added persistence facilities to store the models with a common API.
- Unit Tests (using [gtest](http://code.google.com/p/googletest/)).
- Providing a CMakeLists.txt to enable easy cross-platform building.
@incollection{AHP04,
title={Face recognition with local binary patterns},
author={Ahonen, Timo and Hadid, Abdenour and Pietik{\"a}inen, Matti},
booktitle={Computer vision-eccv 2004},
pages={469--481},
year={2004},
publisher={Springer}
}
@article{BHK97,
title={Eigenfaces vs. fisherfaces: Recognition using class specific linear projection},
author={Belhumeur, Peter N. and Hespanha, Jo{\~a}o P and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={19},
number={7},
pages={711--720},
year={1997},
publisher={IEEE}
}
@inproceedings{Bru92,
title={Face recognition through geometrical features},
author={Brunelli, Roberto and Poggio, Tomaso},
booktitle={Computer Vision—ECCV'92},
pages={792--800},
year={1992},
organization={Springer}
}
@book{Duda01,
title={Pattern classification},
author={Duda, Richard O and Hart, Peter E and Stork, David G},
year={2012},
publisher={John Wiley \& Sons}
}
@article{Fisher36,
title={The use of multiple measurements in taxonomic problems},
author={Fisher, Ronald A},
journal={Annals of eugenics},
volume={7},
number={2},
pages={179--188},
year={1936},
publisher={Wiley Online Library}
}
@article{GBK01,
title={From few to many: Illumination cone models for face recognition under variable lighting and pose},
author={Georghiades, Athinodoros S. and Belhumeur, Peter N. and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={23},
number={6},
pages={643--660},
year={2001},
publisher={IEEE}
}
@article{Kanade73,
title={Picture processing system by computer complex and recognition of human faces},
author={Kanade, Takeo},
year={1974}
}
@article{KM01,
title={Pca versus lda},
author={Mart{\'\i}nez, Aleix M and Kak, Avinash C},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={23},
number={2},
pages={228--233},
year={2001},
publisher={IEEE}
}
@article{Lee05,
title={Acquiring linear subspaces for face recognition under variable lighting},
author={Lee, Kuang-Chih and Ho, Jeffrey and Kriegman, David},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={27},
number={5},
pages={684--698},
year={2005},
publisher={IEEE}
}
@incollection{Messer06,
title={Performance characterisation of face recognition algorithms and their sensitivity to severe illumination changes},
author={Messer, Kieron and Kittler, Josef and Short, James and Heusch, Guillaume and Cardinaux, Fabien and Marcel, Sebastien and Rodriguez, Yann and Shan, Shiguang and Su, Yu and Gao, Wen and others},
booktitle={Advances in Biometrics},
pages={1--11},
year={2005},
publisher={Springer}
}
@article{RJ91,
title={Small sample size effects in statistical pattern recognition: Recommendations for practitioners},
author={Raudys, Sarunas J and Jain, Anil K.},
journal={IEEE Transactions on pattern analysis and machine intelligence},
volume={13},
number={3},
pages={252--264},
year={1991},
publisher={IEEE Computer Society}
}
@article{Tan10,
title={Enhanced local texture feature sets for face recognition under difficult lighting conditions},
author={Tan, Xiaoyang and Triggs, Bill},
journal={Image Processing, IEEE Transactions on},
volume={19},
number={6},
pages={1635--1650},
year={2010},
publisher={IEEE}
}
@article{TP91,
title={Eigenfaces for recognition},
author={Turk, Matthew and Pentland, Alex},
journal={Journal of cognitive neuroscience},
volume={3},
number={1},
pages={71--86},
year={1991},
publisher={MIT Press}
}
@article{Tu06,
title={Newborns' face recognition: Role of inner and outer facial features},
author={Turati, Chiara and Macchi Cassia, Viola and Simion, Francesca and Leo, Irene},
journal={Child development},
volume={77},
number={2},
pages={297--311},
year={2006},
publisher={Wiley Online Library}
}
@article{Wiskott97,
title={Face recognition by elastic bunch graph matching},
author={Wiskott, Laurenz and Fellous, J-M and Kuiger, N and Von Der Malsburg, Christoph},
journal={Pattern Analysis and Machine Intelligence, IEEE Transactions on},
volume={19},
number={7},
pages={775--779},
year={1997},
publisher={IEEE}
}
@article{Zhao03,
title={Face recognition: A literature survey},
author={Zhao, Wenyi and Chellappa, Rama and Phillips, P Jonathon and Rosenfeld, Azriel},
journal={Acm Computing Surveys (CSUR)},
volume={35},
number={4},
pages={399--458},
year={2003},
publisher={ACM}
}
......@@ -7,4 +7,4 @@ The module contains some recently added functionality that has not been stabiliz
.. toctree::
:maxdepth: 2
FaceRecognizer Documentation <facerec/index>
FaceRecognizer Documentation <index>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment